Package gluon :: Module dal
[hide private]
[frames] | no frames]

Source Code for Module gluon.dal

    1  #!/bin/env python 
    2  # -*- coding: utf-8 -*- 
    3   
    4  """ 
    5  This file is part of the web2py Web Framework 
    6  Copyrighted by Massimo Di Pierro <mdipierro@cs.depaul.edu> 
    7  License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html) 
    8   
    9  Thanks to 
   10      * Niall Sweeny <niall.sweeny@fonjax.com> for MS SQL support 
   11      * Marcel Leuthi <mluethi@mlsystems.ch> for Oracle support 
   12      * Denes 
   13      * Chris Clark 
   14      * clach05 
   15      * Denes Lengyel 
   16      * and many others who have contributed to current and previous versions 
   17   
   18  This file contains the DAL support for many relational databases, 
   19  including: 
   20  - SQLite & SpatiaLite 
   21  - MySQL 
   22  - Postgres 
   23  - Firebird 
   24  - Oracle 
   25  - MS SQL 
   26  - DB2 
   27  - Interbase 
   28  - Ingres 
   29  - Informix (9+ and SE) 
   30  - SapDB (experimental) 
   31  - Cubrid (experimental) 
   32  - CouchDB (experimental) 
   33  - MongoDB (in progress) 
   34  - Google:nosql 
   35  - Google:sql 
   36  - Teradata 
   37  - IMAP (experimental) 
   38   
   39  Example of usage: 
   40   
   41  >>> # from dal import DAL, Field 
   42   
   43  ### create DAL connection (and create DB if it doesn't exist) 
   44  >>> db = DAL(('sqlite://storage.sqlite','mysql://a:b@localhost/x'), 
   45  ... folder=None) 
   46   
   47  ### define a table 'person' (create/alter as necessary) 
   48  >>> person = db.define_table('person',Field('name','string')) 
   49   
   50  ### insert a record 
   51  >>> id = person.insert(name='James') 
   52   
   53  ### retrieve it by id 
   54  >>> james = person(id) 
   55   
   56  ### retrieve it by name 
   57  >>> james = person(name='James') 
   58   
   59  ### retrieve it by arbitrary query 
   60  >>> query = (person.name=='James') & (person.name.startswith('J')) 
   61  >>> james = db(query).select(person.ALL)[0] 
   62   
   63  ### update one record 
   64  >>> james.update_record(name='Jim') 
   65  <Row {'id': 1, 'name': 'Jim'}> 
   66   
   67  ### update multiple records by query 
   68  >>> db(person.name.like('J%')).update(name='James') 
   69  1 
   70   
   71  ### delete records by query 
   72  >>> db(person.name.lower() == 'jim').delete() 
   73  0 
   74   
   75  ### retrieve multiple records (rows) 
   76  >>> people = db(person).select(orderby=person.name, 
   77  ... groupby=person.name, limitby=(0,100)) 
   78   
   79  ### further filter them 
   80  >>> james = people.find(lambda row: row.name == 'James').first() 
   81  >>> print james.id, james.name 
   82  1 James 
   83   
   84  ### check aggregates 
   85  >>> counter = person.id.count() 
   86  >>> print db(person).select(counter).first()(counter) 
   87  1 
   88   
   89  ### delete one record 
   90  >>> james.delete_record() 
   91  1 
   92   
   93  ### delete (drop) entire database table 
   94  >>> person.drop() 
   95   
   96  Supported field types: 
   97  id string text boolean integer double decimal password upload 
   98  blob time date datetime 
   99   
  100  Supported DAL URI strings: 
  101  'sqlite://test.db' 
  102  'spatialite://test.db' 
  103  'sqlite:memory' 
  104  'spatialite:memory' 
  105  'jdbc:sqlite://test.db' 
  106  'mysql://root:none@localhost/test' 
  107  'postgres://mdipierro:password@localhost/test' 
  108  'postgres:psycopg2://mdipierro:password@localhost/test' 
  109  'postgres:pg8000://mdipierro:password@localhost/test' 
  110  'jdbc:postgres://mdipierro:none@localhost/test' 
  111  'mssql://web2py:none@A64X2/web2py_test' 
  112  'mssql2://web2py:none@A64X2/web2py_test' # alternate mappings 
  113  'oracle://username:password@database' 
  114  'firebird://user:password@server:3050/database' 
  115  'db2://DSN=dsn;UID=user;PWD=pass' 
  116  'firebird://username:password@hostname/database' 
  117  'firebird_embedded://username:password@c://path' 
  118  'informix://user:password@server:3050/database' 
  119  'informixu://user:password@server:3050/database' # unicode informix 
  120  'ingres://database'  # or use an ODBC connection string, e.g. 'ingres://dsn=dsn_name' 
  121  'google:datastore' # for google app engine datastore 
  122  'google:sql' # for google app engine with sql (mysql compatible) 
  123  'teradata://DSN=dsn;UID=user;PWD=pass; DATABASE=database' # experimental 
  124  'imap://user:password@server:port' # experimental 
  125   
  126  For more info: 
  127  help(DAL) 
  128  help(Field) 
  129  """ 
  130   
  131  ################################################################################### 
  132  # this file only exposes DAL and Field 
  133  ################################################################################### 
  134   
  135  __all__ = ['DAL', 'Field'] 
  136   
  137  MAXCHARLENGTH = 2**15 # not quite but reasonable default max char length 
  138  DEFAULTLENGTH = {'string':512, 
  139                   'password':512, 
  140                   'upload':512, 
  141                   'text':2**15, 
  142                   'blob':2**31} 
  143  TIMINGSSIZE = 100 
  144  SPATIALLIBS = { 
  145      'Windows':'libspatialite', 
  146      'Linux':'libspatialite.so', 
  147      'Darwin':'libspatialite.dylib' 
  148      } 
  149  DEFAULT_URI = 'sqlite://dummy.db' 
  150   
  151  import re 
  152  import sys 
  153  import locale 
  154  import os 
  155  import types 
  156  import datetime 
  157  import threading 
  158  import time 
  159  import csv 
  160  import cgi 
  161  import copy 
  162  import socket 
  163  import logging 
  164  import base64 
  165  import shutil 
  166  import marshal 
  167  import decimal 
  168  import struct 
  169  import urllib 
  170  import hashlib 
  171  import uuid 
  172  import glob 
  173  import traceback 
  174  import platform 
  175   
  176  PYTHON_VERSION = sys.version_info[0] 
  177  if PYTHON_VERSION == 2: 
  178      import cPickle as pickle 
  179      import cStringIO as StringIO 
  180      import copy_reg as copyreg 
  181      hashlib_md5 = hashlib.md5 
  182      bytes, unicode = str, unicode 
  183  else: 
  184      import pickle 
  185      from io import StringIO as StringIO 
  186      import copyreg 
  187      long = int 
  188      hashlib_md5 = lambda s: hashlib.md5(bytes(s,'utf8')) 
  189      bytes, unicode = bytes, str 
  190   
  191  CALLABLETYPES = (types.LambdaType, types.FunctionType, 
  192                   types.BuiltinFunctionType, 
  193                   types.MethodType, types.BuiltinMethodType) 
  194   
  195  TABLE_ARGS = set( 
  196      ('migrate','primarykey','fake_migrate','format','redefine', 
  197       'singular','plural','trigger_name','sequence_name', 
  198       'common_filter','polymodel','table_class','on_define',)) 
  199   
  200  SELECT_ARGS = set( 
  201      ('orderby', 'groupby', 'limitby','required', 'cache', 'left', 
  202       'distinct', 'having', 'join','for_update', 'processor','cacheable')) 
  203   
  204  ogetattr = object.__getattribute__ 
  205  osetattr = object.__setattr__ 
  206  exists = os.path.exists 
  207  pjoin = os.path.join 
  208   
  209  ################################################################################### 
  210  # following checks allow the use of dal without web2py, as a standalone module 
  211  ################################################################################### 
  212  try: 
  213      from utils import web2py_uuid 
  214  except (ImportError, SystemError): 
  215      import uuid 
216 - def web2py_uuid(): return str(uuid.uuid4())
217 218 try: 219 import portalocker 220 have_portalocker = True 221 except ImportError: 222 have_portalocker = False 223 224 try: 225 import serializers 226 have_serializers = True 227 except ImportError: 228 have_serializers = False 229 try: 230 import json as simplejson 231 except ImportError: 232 try: 233 import gluon.contrib.simplejson as simplejson 234 except ImportError: 235 simplejson = None 236 237 try: 238 import validators 239 have_validators = True 240 except (ImportError, SyntaxError): 241 have_validators = False 242 243 LOGGER = logging.getLogger("web2py.dal") 244 DEFAULT = lambda:0 245 246 GLOBAL_LOCKER = threading.RLock() 247 THREAD_LOCAL = threading.local() 248 249 # internal representation of tables with field 250 # <table>.<field>, tables and fields may only be [a-zA-Z0-9_] 251 252 REGEX_TYPE = re.compile('^([\w\_\:]+)') 253 REGEX_DBNAME = re.compile('^(\w+)(\:\w+)*') 254 REGEX_W = re.compile('^\w+$') 255 REGEX_TABLE_DOT_FIELD = re.compile('^(\w+)\.(\w+)$') 256 REGEX_UPLOAD_PATTERN = re.compile('(?P<table>[\w\-]+)\.(?P<field>[\w\-]+)\.(?P<uuidkey>[\w\-]+)\.(?P<name>\w+)\.\w+$') 257 REGEX_CLEANUP_FN = re.compile('[\'"\s;]+') 258 REGEX_UNPACK = re.compile('(?<!\|)\|(?!\|)') 259 REGEX_PYTHON_KEYWORDS = re.compile('^(and|del|from|not|while|as|elif|global|or|with|assert|else|if|pass|yield|break|except|import|print|class|exec|in|raise|continue|finally|is|return|def|for|lambda|try)$') 260 REGEX_SELECT_AS_PARSER = re.compile("\s+AS\s+(\S+)") 261 REGEX_CONST_STRING = re.compile('(\"[^\"]*?\")|(\'[^\']*?\')') 262 REGEX_SEARCH_PATTERN = re.compile('^{[^\.]+\.[^\.]+(\.(lt|gt|le|ge|eq|ne|contains|startswith|year|month|day|hour|minute|second))?(\.not)?}$') 263 REGEX_SQUARE_BRACKETS = re.compile('^.+\[.+\]$') 264 REGEX_STORE_PATTERN = re.compile('\.(?P<e>\w{1,5})$') 265 REGEX_QUOTES = re.compile("'[^']*'") 266 REGEX_ALPHANUMERIC = re.compile('^[0-9a-zA-Z]\w*$') 267 REGEX_PASSWORD = re.compile('\://([^:@]*)\:') 268 REGEX_NOPASSWD = re.compile('(?<=\:)([^:@/]+)(?=@.+)') 269 270 # list of drivers will be built on the fly 271 # and lists only what is available 272 DRIVERS = [] 273 274 try: 275 from new import classobj 276 from google.appengine.ext import db as gae 277 from google.appengine.api import namespace_manager, rdbms 278 from google.appengine.api.datastore_types import Key ### for belongs on ID 279 from google.appengine.ext.db.polymodel import PolyModel 280 DRIVERS.append('google') 281 except ImportError: 282 pass 283 284 if not 'google' in DRIVERS: 285 286 try: 287 from pysqlite2 import dbapi2 as sqlite2 288 DRIVERS.append('SQLite(sqlite2)') 289 except ImportError: 290 LOGGER.debug('no SQLite drivers pysqlite2.dbapi2') 291 292 try: 293 from sqlite3 import dbapi2 as sqlite3 294 DRIVERS.append('SQLite(sqlite3)') 295 except ImportError: 296 LOGGER.debug('no SQLite drivers sqlite3') 297 298 try: 299 # first try contrib driver, then from site-packages (if installed) 300 try: 301 import contrib.pymysql as pymysql 302 # monkeypatch pymysql because they havent fixed the bug: 303 # https://github.com/petehunt/PyMySQL/issues/86 304 pymysql.ESCAPE_REGEX = re.compile("'") 305 pymysql.ESCAPE_MAP = {"'": "''"} 306 # end monkeypatch 307 except ImportError: 308 import pymysql 309 DRIVERS.append('MySQL(pymysql)') 310 except ImportError: 311 LOGGER.debug('no MySQL driver pymysql') 312 313 try: 314 import MySQLdb 315 DRIVERS.append('MySQL(MySQLdb)') 316 except ImportError: 317 LOGGER.debug('no MySQL driver MySQLDB') 318 319 320 try: 321 import psycopg2 322 from psycopg2.extensions import adapt as psycopg2_adapt 323 DRIVERS.append('PostgreSQL(psycopg2)') 324 except ImportError: 325 LOGGER.debug('no PostgreSQL driver psycopg2') 326 327 try: 328 # first try contrib driver, then from site-packages (if installed) 329 try: 330 import contrib.pg8000.dbapi as pg8000 331 except ImportError: 332 import pg8000.dbapi as pg8000 333 DRIVERS.append('PostgreSQL(pg8000)') 334 except ImportError: 335 LOGGER.debug('no PostgreSQL driver pg8000') 336 337 try: 338 import cx_Oracle 339 DRIVERS.append('Oracle(cx_Oracle)') 340 except ImportError: 341 LOGGER.debug('no Oracle driver cx_Oracle') 342 343 try: 344 try: 345 import pyodbc 346 except ImportError: 347 try: 348 import contrib.pypyodbc as pyodbc 349 except Exception, e: 350 raise ImportError(str(e)) 351 DRIVERS.append('MSSQL(pyodbc)') 352 DRIVERS.append('DB2(pyodbc)') 353 DRIVERS.append('Teradata(pyodbc)') 354 DRIVERS.append('Ingres(pyodbc)') 355 except ImportError: 356 LOGGER.debug('no MSSQL/DB2/Teradata/Ingres driver pyodbc') 357 358 try: 359 import Sybase 360 DRIVERS.append('Sybase(Sybase)') 361 except ImportError: 362 LOGGER.debug('no Sybase driver') 363 364 try: 365 import kinterbasdb 366 DRIVERS.append('Interbase(kinterbasdb)') 367 DRIVERS.append('Firebird(kinterbasdb)') 368 except ImportError: 369 LOGGER.debug('no Firebird/Interbase driver kinterbasdb') 370 371 try: 372 import fdb 373 DRIVERS.append('Firbird(fdb)') 374 except ImportError: 375 LOGGER.debug('no Firebird driver fdb') 376 ##### 377 try: 378 import firebirdsql 379 DRIVERS.append('Firebird(firebirdsql)') 380 except ImportError: 381 LOGGER.debug('no Firebird driver firebirdsql') 382 383 try: 384 import informixdb 385 DRIVERS.append('Informix(informixdb)') 386 LOGGER.warning('Informix support is experimental') 387 except ImportError: 388 LOGGER.debug('no Informix driver informixdb') 389 390 try: 391 import sapdb 392 DRIVERS.append('SQL(sapdb)') 393 LOGGER.warning('SAPDB support is experimental') 394 except ImportError: 395 LOGGER.debug('no SAP driver sapdb') 396 397 try: 398 import cubriddb 399 DRIVERS.append('Cubrid(cubriddb)') 400 LOGGER.warning('Cubrid support is experimental') 401 except ImportError: 402 LOGGER.debug('no Cubrid driver cubriddb') 403 404 try: 405 from com.ziclix.python.sql import zxJDBC 406 import java.sql 407 # Try sqlite jdbc driver from http://www.zentus.com/sqlitejdbc/ 408 from org.sqlite import JDBC # required by java.sql; ensure we have it 409 zxJDBC_sqlite = java.sql.DriverManager 410 DRIVERS.append('PostgreSQL(zxJDBC)') 411 DRIVERS.append('SQLite(zxJDBC)') 412 LOGGER.warning('zxJDBC support is experimental') 413 is_jdbc = True 414 except ImportError: 415 LOGGER.debug('no SQLite/PostgreSQL driver zxJDBC') 416 is_jdbc = False 417 418 try: 419 import couchdb 420 DRIVERS.append('CouchDB(couchdb)') 421 except ImportError: 422 LOGGER.debug('no Couchdb driver couchdb') 423 424 try: 425 import pymongo 426 DRIVERS.append('MongoDB(pymongo)') 427 except: 428 LOGGER.debug('no MongoDB driver pymongo') 429 430 try: 431 import imaplib 432 DRIVERS.append('IMAP(imaplib)') 433 except: 434 LOGGER.debug('no IMAP driver imaplib') 435 436 PLURALIZE_RULES = [ 437 (re.compile('child$'), re.compile('child$'), 'children'), 438 (re.compile('oot$'), re.compile('oot$'), 'eet'), 439 (re.compile('ooth$'), re.compile('ooth$'), 'eeth'), 440 (re.compile('l[eo]af$'), re.compile('l([eo])af$'), 'l\\1aves'), 441 (re.compile('sis$'), re.compile('sis$'), 'ses'), 442 (re.compile('man$'), re.compile('man$'), 'men'), 443 (re.compile('ife$'), re.compile('ife$'), 'ives'), 444 (re.compile('eau$'), re.compile('eau$'), 'eaux'), 445 (re.compile('lf$'), re.compile('lf$'), 'lves'), 446 (re.compile('[sxz]$'), re.compile('$'), 'es'), 447 (re.compile('[^aeioudgkprt]h$'), re.compile('$'), 'es'), 448 (re.compile('(qu|[^aeiou])y$'), re.compile('y$'), 'ies'), 449 (re.compile('$'), re.compile('$'), 's'), 450 ]
451 452 -def pluralize(singular, rules=PLURALIZE_RULES):
453 for line in rules: 454 re_search, re_sub, replace = line 455 plural = re_search.search(singular) and re_sub.sub(replace, singular) 456 if plural: return plural
457
458 -def hide_password(uri):
459 return REGEX_NOPASSWD.sub('******',uri)
460
461 -def OR(a,b):
462 return a|b
463
464 -def AND(a,b):
465 return a&b
466
467 -def IDENTITY(x): return x
468
469 -def varquote_aux(name,quotestr='%s'):
470 return name if REGEX_W.match(name) else quotestr % name
471 472 if 'google' in DRIVERS: 473 474 is_jdbc = False
475 476 - class GAEDecimalProperty(gae.Property):
477 """ 478 GAE decimal implementation 479 """ 480 data_type = decimal.Decimal 481
482 - def __init__(self, precision, scale, **kwargs):
483 super(GAEDecimalProperty, self).__init__(self, **kwargs) 484 d = '1.' 485 for x in range(scale): 486 d += '0' 487 self.round = decimal.Decimal(d)
488
489 - def get_value_for_datastore(self, model_instance):
490 value = super(GAEDecimalProperty, self)\ 491 .get_value_for_datastore(model_instance) 492 if value is None or value == '': 493 return None 494 else: 495 return str(value)
496
497 - def make_value_from_datastore(self, value):
498 if value is None or value == '': 499 return None 500 else: 501 return decimal.Decimal(value).quantize(self.round)
502
503 - def validate(self, value):
504 value = super(GAEDecimalProperty, self).validate(value) 505 if value is None or isinstance(value, decimal.Decimal): 506 return value 507 elif isinstance(value, basestring): 508 return decimal.Decimal(value) 509 raise gae.BadValueError("Property %s must be a Decimal or string."\ 510 % self.name)
511
512 ################################################################################### 513 # class that handles connection pooling (all adapters are derived from this one) 514 ################################################################################### 515 516 -class ConnectionPool(object):
517 518 POOLS = {} 519 check_active_connection = True 520 521 @staticmethod
522 - def set_folder(folder):
523 THREAD_LOCAL.folder = folder
524 525 # ## this allows gluon to commit/rollback all dbs in this thread 526
527 - def close(self,action='commit',really=True):
528 if action: 529 if callable(action): 530 action(self) 531 else: 532 getattr(self, action)() 533 # ## if you want pools, recycle this connection 534 if self.pool_size: 535 GLOBAL_LOCKER.acquire() 536 pool = ConnectionPool.POOLS[self.uri] 537 if len(pool) < self.pool_size: 538 pool.append(self.connection) 539 really = False 540 GLOBAL_LOCKER.release() 541 if really: 542 self.close_connection() 543 self.connection = None
544 545 @staticmethod
546 - def close_all_instances(action):
547 """ to close cleanly databases in a multithreaded environment """ 548 dbs = getattr(THREAD_LOCAL,'db_instances',{}).items() 549 for db_uid, db_group in dbs: 550 for db in db_group: 551 if hasattr(db,'_adapter'): 552 db._adapter.close(action) 553 getattr(THREAD_LOCAL,'db_instances',{}).clear() 554 getattr(THREAD_LOCAL,'db_instances_zombie',{}).clear() 555 if callable(action): 556 action(None) 557 return
558
559 - def find_or_make_work_folder(self):
560 """ this actually does not make the folder. it has to be there """ 561 self.folder = getattr(THREAD_LOCAL,'folder','') 562 563 # Creating the folder if it does not exist 564 if False and self.folder and not exists(self.folder): 565 os.mkdir(self.folder)
566
567 - def after_connection_hook(self):
568 """hook for the after_connection parameter""" 569 if callable(self._after_connection): 570 self._after_connection(self) 571 self.after_connection()
572
573 - def after_connection(self):
574 """ this it is supposed to be overloaded by adapters""" 575 pass
576
577 - def reconnect(self, f=None, cursor=True):
578 """ 579 this function defines: self.connection and self.cursor 580 (iff cursor is True) 581 if self.pool_size>0 it will try pull the connection from the pool 582 if the connection is not active (closed by db server) it will loop 583 if not self.pool_size or no active connections in pool makes a new one 584 """ 585 if getattr(self,'connection',None) != None: 586 return 587 if f is None: 588 f = self.connector 589 590 if not self.pool_size: 591 self.connection = f() 592 self.cursor = cursor and self.connection.cursor() 593 else: 594 uri = self.uri 595 POOLS = ConnectionPool.POOLS 596 while True: 597 GLOBAL_LOCKER.acquire() 598 if not uri in POOLS: 599 POOLS[uri] = [] 600 if POOLS[uri]: 601 self.connection = POOLS[uri].pop() 602 GLOBAL_LOCKER.release() 603 self.cursor = cursor and self.connection.cursor() 604 try: 605 if self.cursor and self.check_active_connection: 606 self.execute('SELECT 1;') 607 break 608 except: 609 pass 610 else: 611 GLOBAL_LOCKER.release() 612 self.connection = f() 613 self.cursor = cursor and self.connection.cursor() 614 break 615 self.after_connection_hook()
616
617 618 ################################################################################### 619 # this is a generic adapter that does nothing; all others are derived from this one 620 ################################################################################### 621 622 -class BaseAdapter(ConnectionPool):
623 native_json = False 624 driver = None 625 driver_name = None 626 drivers = () # list of drivers from which to pick 627 connection = None 628 maxcharlength = MAXCHARLENGTH 629 commit_on_alter_table = False 630 support_distributed_transaction = False 631 uploads_in_blob = False 632 can_select_for_update = True 633 634 TRUE = 'T' 635 FALSE = 'F' 636 types = { 637 'boolean': 'CHAR(1)', 638 'string': 'CHAR(%(length)s)', 639 'text': 'TEXT', 640 'json': 'TEXT', 641 'password': 'CHAR(%(length)s)', 642 'blob': 'BLOB', 643 'upload': 'CHAR(%(length)s)', 644 'integer': 'INTEGER', 645 'bigint': 'INTEGER', 646 'float':'DOUBLE', 647 'double': 'DOUBLE', 648 'decimal': 'DOUBLE', 649 'date': 'DATE', 650 'time': 'TIME', 651 'datetime': 'TIMESTAMP', 652 'id': 'INTEGER PRIMARY KEY AUTOINCREMENT', 653 'reference': 'INTEGER REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 654 'list:integer': 'TEXT', 655 'list:string': 'TEXT', 656 'list:reference': 'TEXT', 657 # the two below are only used when DAL(...bigint_id=True) and replace 'id','reference' 658 'big-id': 'BIGINT PRIMARY KEY AUTOINCREMENT', 659 'big-reference': 'BIGINT REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 660 } 661
662 - def id_query(self, table):
663 return table._id != None
664
665 - def adapt(self, obj):
666 return "'%s'" % obj.replace("'", "''")
667
668 - def smart_adapt(self, obj):
669 if isinstance(obj,(int,float)): 670 return str(obj) 671 return self.adapt(str(obj))
672
673 - def integrity_error(self):
674 return self.driver.IntegrityError
675
676 - def operational_error(self):
677 return self.driver.OperationalError
678
679 - def file_exists(self, filename):
680 """ 681 to be used ONLY for files that on GAE may not be on filesystem 682 """ 683 return exists(filename)
684
685 - def file_open(self, filename, mode='rb', lock=True):
686 """ 687 to be used ONLY for files that on GAE may not be on filesystem 688 """ 689 if have_portalocker and lock: 690 fileobj = portalocker.LockedFile(filename,mode) 691 else: 692 fileobj = open(filename,mode) 693 return fileobj
694
695 - def file_close(self, fileobj):
696 """ 697 to be used ONLY for files that on GAE may not be on filesystem 698 """ 699 if fileobj: 700 fileobj.close()
701
702 - def file_delete(self, filename):
703 os.unlink(filename)
704
705 - def find_driver(self,adapter_args,uri=None):
706 if getattr(self,'driver',None) != None: 707 return 708 drivers_available = [driver for driver in self.drivers 709 if driver in globals()] 710 if uri: 711 items = uri.split('://',1)[0].split(':') 712 request_driver = items[1] if len(items)>1 else None 713 else: 714 request_driver = None 715 request_driver = request_driver or adapter_args.get('driver') 716 if request_driver: 717 if request_driver in drivers_available: 718 self.driver_name = request_driver 719 self.driver = globals().get(request_driver) 720 else: 721 raise RuntimeError("driver %s not available" % request_driver) 722 elif drivers_available: 723 self.driver_name = drivers_available[0] 724 self.driver = globals().get(self.driver_name) 725 else: 726 raise RuntimeError("no driver available %s" % str(self.drivers))
727 728
729 - def __init__(self, db,uri,pool_size=0, folder=None, db_codec='UTF-8', 730 credential_decoder=IDENTITY, driver_args={}, 731 adapter_args={},do_connect=True, after_connection=None):
732 self.db = db 733 self.dbengine = "None" 734 self.uri = uri 735 self.pool_size = pool_size 736 self.folder = folder 737 self.db_codec = db_codec 738 self._after_connection = after_connection 739 class Dummy(object): 740 lastrowid = 1 741 def __getattr__(self, value): 742 return lambda *a, **b: []
743 self.connection = Dummy() 744 self.cursor = Dummy() 745
746 - def sequence_name(self,tablename):
747 return '%s_sequence' % tablename
748
749 - def trigger_name(self,tablename):
750 return '%s_sequence' % tablename
751
752 - def varquote(self,name):
753 return name
754
755 - def create_table(self, table, 756 migrate=True, 757 fake_migrate=False, 758 polymodel=None):
759 db = table._db 760 fields = [] 761 # PostGIS geo fields are added after the table has been created 762 postcreation_fields = [] 763 sql_fields = {} 764 sql_fields_aux = {} 765 TFK = {} 766 tablename = table._tablename 767 sortable = 0 768 types = self.types 769 for field in table: 770 sortable += 1 771 field_name = field.name 772 field_type = field.type 773 if isinstance(field_type,SQLCustomType): 774 ftype = field_type.native or field_type.type 775 elif field_type.startswith('reference'): 776 referenced = field_type[10:].strip() 777 if referenced == '.': 778 referenced = tablename 779 constraint_name = self.constraint_name(tablename, field_name) 780 if not '.' in referenced \ 781 and referenced != tablename \ 782 and hasattr(table,'_primarykey'): 783 ftype = types['integer'] 784 else: 785 if hasattr(table,'_primarykey'): 786 rtablename,rfieldname = referenced.split('.') 787 rtable = db[rtablename] 788 rfield = rtable[rfieldname] 789 # must be PK reference or unique 790 if rfieldname in rtable._primarykey or \ 791 rfield.unique: 792 ftype = types[rfield.type[:9]] % \ 793 dict(length=rfield.length) 794 # multicolumn primary key reference? 795 if not rfield.unique and len(rtable._primarykey)>1: 796 # then it has to be a table level FK 797 if rtablename not in TFK: 798 TFK[rtablename] = {} 799 TFK[rtablename][rfieldname] = field_name 800 else: 801 ftype = ftype + \ 802 types['reference FK'] % dict( 803 constraint_name = constraint_name, # should be quoted 804 foreign_key = '%s (%s)' % (rtablename, 805 rfieldname), 806 table_name = tablename, 807 field_name = field_name, 808 on_delete_action=field.ondelete) 809 else: 810 # make a guess here for circular references 811 if referenced in db: 812 id_fieldname = db[referenced]._id.name 813 elif referenced == tablename: 814 id_fieldname = table._id.name 815 else: #make a guess 816 id_fieldname = 'id' 817 ftype = types[field_type[:9]] % dict( 818 index_name = field_name+'__idx', 819 field_name = field_name, 820 constraint_name = constraint_name, 821 foreign_key = '%s (%s)' % (referenced, 822 id_fieldname), 823 on_delete_action=field.ondelete) 824 elif field_type.startswith('list:reference'): 825 ftype = types[field_type[:14]] 826 elif field_type.startswith('decimal'): 827 precision, scale = map(int,field_type[8:-1].split(',')) 828 ftype = types[field_type[:7]] % \ 829 dict(precision=precision,scale=scale) 830 elif field_type.startswith('geo'): 831 if not hasattr(self,'srid'): 832 raise RuntimeError('Adapter does not support geometry') 833 srid = self.srid 834 geotype, parms = field_type[:-1].split('(') 835 if not geotype in types: 836 raise SyntaxError( 837 'Field: unknown field type: %s for %s' \ 838 % (field_type, field_name)) 839 ftype = types[geotype] 840 if self.dbengine == 'postgres' and geotype == 'geometry': 841 # parameters: schema, srid, dimension 842 dimension = 2 # GIS.dimension ??? 843 parms = parms.split(',') 844 if len(parms) == 3: 845 schema, srid, dimension = parms 846 elif len(parms) == 2: 847 schema, srid = parms 848 else: 849 schema = parms[0] 850 ftype = "SELECT AddGeometryColumn ('%%(schema)s', '%%(tablename)s', '%%(fieldname)s', %%(srid)s, '%s', %%(dimension)s);" % types[geotype] 851 ftype = ftype % dict(schema=schema, 852 tablename=tablename, 853 fieldname=field_name, srid=srid, 854 dimension=dimension) 855 postcreation_fields.append(ftype) 856 elif not field_type in types: 857 raise SyntaxError('Field: unknown field type: %s for %s' % \ 858 (field_type, field_name)) 859 else: 860 ftype = types[field_type]\ 861 % dict(length=field.length) 862 if not field_type.startswith('id') and \ 863 not field_type.startswith('reference'): 864 if field.notnull: 865 ftype += ' NOT NULL' 866 else: 867 ftype += self.ALLOW_NULL() 868 if field.unique: 869 ftype += ' UNIQUE' 870 if field.custom_qualifier: 871 ftype += ' %s' % field.custom_qualifier 872 873 # add to list of fields 874 sql_fields[field_name] = dict( 875 length=field.length, 876 unique=field.unique, 877 notnull=field.notnull, 878 sortable=sortable, 879 type=str(field_type), 880 sql=ftype) 881 882 if field.notnull and not field.default is None: 883 # Caveat: sql_fields and sql_fields_aux 884 # differ for default values. 885 # sql_fields is used to trigger migrations and sql_fields_aux 886 # is used for create tables. 887 # The reason is that we do not want to trigger 888 # a migration simply because a default value changes. 889 not_null = self.NOT_NULL(field.default, field_type) 890 ftype = ftype.replace('NOT NULL', not_null) 891 sql_fields_aux[field_name] = dict(sql=ftype) 892 # Postgres - PostGIS: 893 # geometry fields are added after the table has been created, not now 894 if not (self.dbengine == 'postgres' and \ 895 field_type.startswith('geom')): 896 fields.append('%s %s' % (field_name, ftype)) 897 other = ';' 898 899 # backend-specific extensions to fields 900 if self.dbengine == 'mysql': 901 if not hasattr(table, "_primarykey"): 902 fields.append('PRIMARY KEY(%s)' % table._id.name) 903 other = ' ENGINE=InnoDB CHARACTER SET utf8;' 904 905 fields = ',\n '.join(fields) 906 for rtablename in TFK: 907 rfields = TFK[rtablename] 908 pkeys = db[rtablename]._primarykey 909 fkeys = [ rfields[k] for k in pkeys ] 910 fields = fields + ',\n ' + \ 911 types['reference TFK'] % dict( 912 table_name = tablename, 913 field_name=', '.join(fkeys), 914 foreign_table = rtablename, 915 foreign_key = ', '.join(pkeys), 916 on_delete_action = field.ondelete) 917 918 if hasattr(table,'_primarykey'): 919 query = "CREATE TABLE %s(\n %s,\n %s) %s" % \ 920 (tablename, fields, 921 self.PRIMARY_KEY(', '.join(table._primarykey)),other) 922 else: 923 query = "CREATE TABLE %s(\n %s\n)%s" % \ 924 (tablename, fields, other) 925 926 if self.uri.startswith('sqlite:///') \ 927 or self.uri.startswith('spatialite:///'): 928 path_encoding = sys.getfilesystemencoding() \ 929 or locale.getdefaultlocale()[1] or 'utf8' 930 dbpath = self.uri[9:self.uri.rfind('/')]\ 931 .decode('utf8').encode(path_encoding) 932 else: 933 dbpath = self.folder 934 935 if not migrate: 936 return query 937 elif self.uri.startswith('sqlite:memory')\ 938 or self.uri.startswith('spatialite:memory'): 939 table._dbt = None 940 elif isinstance(migrate, str): 941 table._dbt = pjoin(dbpath, migrate) 942 else: 943 table._dbt = pjoin( 944 dbpath, '%s_%s.table' % (table._db._uri_hash, tablename)) 945 946 if table._dbt: 947 table._loggername = pjoin(dbpath, 'sql.log') 948 logfile = self.file_open(table._loggername, 'a') 949 else: 950 logfile = None 951 if not table._dbt or not self.file_exists(table._dbt): 952 if table._dbt: 953 logfile.write('timestamp: %s\n' 954 % datetime.datetime.today().isoformat()) 955 logfile.write(query + '\n') 956 if not fake_migrate: 957 self.create_sequence_and_triggers(query,table) 958 table._db.commit() 959 # Postgres geom fields are added now, 960 # after the table has been created 961 for query in postcreation_fields: 962 self.execute(query) 963 table._db.commit() 964 if table._dbt: 965 tfile = self.file_open(table._dbt, 'w') 966 pickle.dump(sql_fields, tfile) 967 self.file_close(tfile) 968 if fake_migrate: 969 logfile.write('faked!\n') 970 else: 971 logfile.write('success!\n') 972 else: 973 tfile = self.file_open(table._dbt, 'r') 974 try: 975 sql_fields_old = pickle.load(tfile) 976 except EOFError: 977 self.file_close(tfile) 978 self.file_close(logfile) 979 raise RuntimeError('File %s appears corrupted' % table._dbt) 980 self.file_close(tfile) 981 if sql_fields != sql_fields_old: 982 self.migrate_table(table, 983 sql_fields, sql_fields_old, 984 sql_fields_aux, logfile, 985 fake_migrate=fake_migrate) 986 self.file_close(logfile) 987 return query
988
989 - def migrate_table( 990 self, 991 table, 992 sql_fields, 993 sql_fields_old, 994 sql_fields_aux, 995 logfile, 996 fake_migrate=False, 997 ):
998 db = table._db 999 db._migrated.append(table._tablename) 1000 tablename = table._tablename 1001 def fix(item): 1002 k,v=item 1003 if not isinstance(v,dict): 1004 v=dict(type='unkown',sql=v) 1005 return k.lower(),v
1006 # make sure all field names are lower case to avoid 1007 # migrations because of case cahnge 1008 sql_fields = dict(map(fix,sql_fields.iteritems())) 1009 sql_fields_old = dict(map(fix,sql_fields_old.iteritems())) 1010 sql_fields_aux = dict(map(fix,sql_fields_aux.iteritems())) 1011 if db._debug: 1012 logging.debug('migrating %s to %s' % (sql_fields_old,sql_fields)) 1013 1014 keys = sql_fields.keys() 1015 for key in sql_fields_old: 1016 if not key in keys: 1017 keys.append(key) 1018 new_add = self.concat_add(tablename) 1019 1020 metadata_change = False 1021 sql_fields_current = copy.copy(sql_fields_old) 1022 for key in keys: 1023 query = None 1024 if not key in sql_fields_old: 1025 sql_fields_current[key] = sql_fields[key] 1026 if self.dbengine in ('postgres',) and \ 1027 sql_fields[key]['type'].startswith('geometry'): 1028 # 'sql' == ftype in sql 1029 query = [ sql_fields[key]['sql'] ] 1030 else: 1031 query = ['ALTER TABLE %s ADD %s %s;' % \ 1032 (tablename, key, 1033 sql_fields_aux[key]['sql'].replace(', ', new_add))] 1034 metadata_change = True 1035 elif self.dbengine in ('sqlite', 'spatialite'): 1036 if key in sql_fields: 1037 sql_fields_current[key] = sql_fields[key] 1038 metadata_change = True 1039 elif not key in sql_fields: 1040 del sql_fields_current[key] 1041 ftype = sql_fields_old[key]['type'] 1042 if self.dbengine in ('postgres',) \ 1043 and ftype.startswith('geometry'): 1044 geotype, parms = ftype[:-1].split('(') 1045 schema = parms.split(',')[0] 1046 query = [ "SELECT DropGeometryColumn ('%(schema)s', '%(table)s', '%(field)s');" % dict(schema=schema, table=tablename, field=key,) ] 1047 elif not self.dbengine in ('firebird',): 1048 query = ['ALTER TABLE %s DROP COLUMN %s;' 1049 % (tablename, key)] 1050 else: 1051 query = ['ALTER TABLE %s DROP %s;' % (tablename, key)] 1052 metadata_change = True 1053 elif sql_fields[key]['sql'] != sql_fields_old[key]['sql'] \ 1054 and not (key in table.fields and 1055 isinstance(table[key].type, SQLCustomType)) \ 1056 and not sql_fields[key]['type'].startswith('reference')\ 1057 and not sql_fields[key]['type'].startswith('double')\ 1058 and not sql_fields[key]['type'].startswith('id'): 1059 sql_fields_current[key] = sql_fields[key] 1060 t = tablename 1061 tt = sql_fields_aux[key]['sql'].replace(', ', new_add) 1062 if not self.dbengine in ('firebird',): 1063 query = ['ALTER TABLE %s ADD %s__tmp %s;' % (t, key, tt), 1064 'UPDATE %s SET %s__tmp=%s;' % (t, key, key), 1065 'ALTER TABLE %s DROP COLUMN %s;' % (t, key), 1066 'ALTER TABLE %s ADD %s %s;' % (t, key, tt), 1067 'UPDATE %s SET %s=%s__tmp;' % (t, key, key), 1068 'ALTER TABLE %s DROP COLUMN %s__tmp;' % (t, key)] 1069 else: 1070 query = ['ALTER TABLE %s ADD %s__tmp %s;' % (t, key, tt), 1071 'UPDATE %s SET %s__tmp=%s;' % (t, key, key), 1072 'ALTER TABLE %s DROP %s;' % (t, key), 1073 'ALTER TABLE %s ADD %s %s;' % (t, key, tt), 1074 'UPDATE %s SET %s=%s__tmp;' % (t, key, key), 1075 'ALTER TABLE %s DROP %s__tmp;' % (t, key)] 1076 metadata_change = True 1077 elif sql_fields[key]['type'] != sql_fields_old[key]['type']: 1078 sql_fields_current[key] = sql_fields[key] 1079 metadata_change = True 1080 1081 if query: 1082 logfile.write('timestamp: %s\n' 1083 % datetime.datetime.today().isoformat()) 1084 db['_lastsql'] = '\n'.join(query) 1085 for sub_query in query: 1086 logfile.write(sub_query + '\n') 1087 if not fake_migrate: 1088 self.execute(sub_query) 1089 # Caveat: mysql, oracle and firebird do not allow multiple alter table 1090 # in one transaction so we must commit partial transactions and 1091 # update table._dbt after alter table. 1092 if db._adapter.commit_on_alter_table: 1093 db.commit() 1094 tfile = self.file_open(table._dbt, 'w') 1095 pickle.dump(sql_fields_current, tfile) 1096 self.file_close(tfile) 1097 logfile.write('success!\n') 1098 else: 1099 logfile.write('faked!\n') 1100 elif metadata_change: 1101 tfile = self.file_open(table._dbt, 'w') 1102 pickle.dump(sql_fields_current, tfile) 1103 self.file_close(tfile) 1104 1105 if metadata_change and \ 1106 not (query and self.dbengine in ('mysql','oracle','firebird')): 1107 db.commit() 1108 tfile = self.file_open(table._dbt, 'w') 1109 pickle.dump(sql_fields_current, tfile) 1110 self.file_close(tfile) 1111
1112 - def LOWER(self, first):
1113 return 'LOWER(%s)' % self.expand(first)
1114
1115 - def UPPER(self, first):
1116 return 'UPPER(%s)' % self.expand(first)
1117
1118 - def COUNT(self, first, distinct=None):
1119 return ('COUNT(%s)' if not distinct else 'COUNT(DISTINCT %s)') \ 1120 % self.expand(first)
1121
1122 - def EXTRACT(self, first, what):
1123 return "EXTRACT(%s FROM %s)" % (what, self.expand(first))
1124
1125 - def EPOCH(self, first):
1126 return self.EXTRACT(first, 'epoch')
1127
1128 - def AGGREGATE(self, first, what):
1129 return "%s(%s)" % (what, self.expand(first))
1130
1131 - def JOIN(self):
1132 return 'JOIN'
1133
1134 - def LEFT_JOIN(self):
1135 return 'LEFT JOIN'
1136
1137 - def RANDOM(self):
1138 return 'Random()'
1139
1140 - def NOT_NULL(self, default, field_type):
1141 return 'NOT NULL DEFAULT %s' % self.represent(default,field_type)
1142
1143 - def COALESCE(self, first, second):
1144 expressions = [self.expand(first)]+[self.expand(e) for e in second] 1145 return 'COALESCE(%s)' % ','.join(expressions)
1146
1147 - def COALESCE_ZERO(self, first):
1148 return 'COALESCE(%s,0)' % self.expand(first)
1149
1150 - def RAW(self, first):
1151 return first
1152
1153 - def ALLOW_NULL(self):
1154 return ''
1155
1156 - def SUBSTRING(self, field, parameters):
1157 return 'SUBSTR(%s,%s,%s)' % (self.expand(field), parameters[0], parameters[1])
1158
1159 - def PRIMARY_KEY(self, key):
1160 return 'PRIMARY KEY(%s)' % key
1161
1162 - def _drop(self, table, mode):
1163 return ['DROP TABLE %s;' % table]
1164
1165 - def drop(self, table, mode=''):
1166 db = table._db 1167 if table._dbt: 1168 logfile = self.file_open(table._loggername, 'a') 1169 queries = self._drop(table, mode) 1170 for query in queries: 1171 if table._dbt: 1172 logfile.write(query + '\n') 1173 self.execute(query) 1174 db.commit() 1175 del db[table._tablename] 1176 del db.tables[db.tables.index(table._tablename)] 1177 db._remove_references_to(table) 1178 if table._dbt: 1179 self.file_delete(table._dbt) 1180 logfile.write('success!\n')
1181
1182 - def _insert(self, table, fields):
1183 if fields: 1184 keys = ','.join(f.name for f, v in fields) 1185 values = ','.join(self.expand(v, f.type) for f, v in fields) 1186 return 'INSERT INTO %s(%s) VALUES (%s);' % (table, keys, values) 1187 else: 1188 return self._insert_empty(table)
1189
1190 - def _insert_empty(self, table):
1191 return 'INSERT INTO %s DEFAULT VALUES;' % table
1192
1193 - def insert(self, table, fields):
1194 query = self._insert(table,fields) 1195 try: 1196 self.execute(query) 1197 except Exception: 1198 e = sys.exc_info()[1] 1199 if isinstance(e,self.integrity_error_class()): 1200 return None 1201 raise e 1202 if hasattr(table,'_primarykey'): 1203 return dict([(k[0].name, k[1]) for k in fields \ 1204 if k[0].name in table._primarykey]) 1205 id = self.lastrowid(table) 1206 if not isinstance(id,int): 1207 return id 1208 rid = Reference(id) 1209 (rid._table, rid._record) = (table, None) 1210 return rid
1211
1212 - def bulk_insert(self, table, items):
1213 return [self.insert(table,item) for item in items]
1214
1215 - def NOT(self, first):
1216 return '(NOT %s)' % self.expand(first)
1217
1218 - def AND(self, first, second):
1219 return '(%s AND %s)' % (self.expand(first), self.expand(second))
1220
1221 - def OR(self, first, second):
1222 return '(%s OR %s)' % (self.expand(first), self.expand(second))
1223
1224 - def BELONGS(self, first, second):
1225 if isinstance(second, str): 1226 return '(%s IN (%s))' % (self.expand(first), second[:-1]) 1227 elif not second: 1228 return '(1=0)' 1229 items = ','.join(self.expand(item, first.type) for item in second) 1230 return '(%s IN (%s))' % (self.expand(first), items)
1231
1232 - def REGEXP(self, first, second):
1233 "regular expression operator" 1234 raise NotImplementedError
1235
1236 - def LIKE(self, first, second):
1237 "case sensitive like operator" 1238 raise NotImplementedError
1239
1240 - def ILIKE(self, first, second):
1241 "case in-sensitive like operator" 1242 return '(%s LIKE %s)' % (self.expand(first), 1243 self.expand(second, 'string'))
1244
1245 - def STARTSWITH(self, first, second):
1246 return '(%s LIKE %s)' % (self.expand(first), 1247 self.expand(second+'%', 'string'))
1248
1249 - def ENDSWITH(self, first, second):
1250 return '(%s LIKE %s)' % (self.expand(first), 1251 self.expand('%'+second, 'string'))
1252
1253 - def CONTAINS(self, first, second, case_sensitive=False):
1254 if isinstance(second,Expression): 1255 field = self.expand(first) 1256 expr = self.expand(second,'string') 1257 if first.type.startswith('list:'): 1258 expr = 'CONCAT("|", %s, "|")' % expr 1259 elif not first.type in ('string', 'text', 'json'): 1260 raise RuntimeError("Expression Not Supported") 1261 return 'INSTR(%s,%s)' % (field, expr) 1262 else: 1263 if first.type in ('string', 'text', 'json'): 1264 key = '%'+str(second).replace('%','%%')+'%' 1265 elif first.type.startswith('list:'): 1266 key = '%|'+str(second).replace('|','||').replace('%','%%')+'|%' 1267 else: 1268 raise RuntimeError("Expression Not Supported") 1269 op = case_sensitive and self.LIKE or self.ILIKE 1270 return op(first,key)
1271
1272 - def EQ(self, first, second=None):
1273 if second is None: 1274 return '(%s IS NULL)' % self.expand(first) 1275 return '(%s = %s)' % (self.expand(first), 1276 self.expand(second, first.type))
1277
1278 - def NE(self, first, second=None):
1279 if second is None: 1280 return '(%s IS NOT NULL)' % self.expand(first) 1281 return '(%s <> %s)' % (self.expand(first), 1282 self.expand(second, first.type))
1283
1284 - def LT(self,first,second=None):
1285 if second is None: 1286 raise RuntimeError("Cannot compare %s < None" % first) 1287 return '(%s < %s)' % (self.expand(first), 1288 self.expand(second,first.type))
1289
1290 - def LE(self,first,second=None):
1291 if second is None: 1292 raise RuntimeError("Cannot compare %s <= None" % first) 1293 return '(%s <= %s)' % (self.expand(first), 1294 self.expand(second,first.type))
1295
1296 - def GT(self,first,second=None):
1297 if second is None: 1298 raise RuntimeError("Cannot compare %s > None" % first) 1299 return '(%s > %s)' % (self.expand(first), 1300 self.expand(second,first.type))
1301
1302 - def GE(self,first,second=None):
1303 if second is None: 1304 raise RuntimeError("Cannot compare %s >= None" % first) 1305 return '(%s >= %s)' % (self.expand(first), 1306 self.expand(second,first.type))
1307
1308 - def ADD(self, first, second):
1309 return '(%s + %s)' % (self.expand(first), 1310 self.expand(second, first.type))
1311
1312 - def SUB(self, first, second):
1313 return '(%s - %s)' % (self.expand(first), 1314 self.expand(second, first.type))
1315
1316 - def MUL(self, first, second):
1317 return '(%s * %s)' % (self.expand(first), 1318 self.expand(second, first.type))
1319
1320 - def DIV(self, first, second):
1321 return '(%s / %s)' % (self.expand(first), 1322 self.expand(second, first.type))
1323
1324 - def MOD(self, first, second):
1325 return '(%s %% %s)' % (self.expand(first), 1326 self.expand(second, first.type))
1327
1328 - def AS(self, first, second):
1329 return '%s AS %s' % (self.expand(first), second)
1330
1331 - def ON(self, first, second):
1332 if use_common_filters(second): 1333 second = self.common_filter(second,[first._tablename]) 1334 return '%s ON %s' % (self.expand(first), self.expand(second))
1335
1336 - def INVERT(self, first):
1337 return '%s DESC' % self.expand(first)
1338
1339 - def COMMA(self, first, second):
1340 return '%s, %s' % (self.expand(first), self.expand(second))
1341
1342 - def expand(self, expression, field_type=None):
1343 if isinstance(expression, Field): 1344 return '%s.%s' % (expression.tablename, expression.name) 1345 elif isinstance(expression, (Expression, Query)): 1346 first = expression.first 1347 second = expression.second 1348 op = expression.op 1349 optional_args = expression.optional_args or {} 1350 if not second is None: 1351 return op(first, second, **optional_args) 1352 elif not first is None: 1353 return op(first,**optional_args) 1354 elif isinstance(op, str): 1355 if op.endswith(';'): 1356 op=op[:-1] 1357 return '(%s)' % op 1358 else: 1359 return op() 1360 elif field_type: 1361 return str(self.represent(expression,field_type)) 1362 elif isinstance(expression,(list,tuple)): 1363 return ','.join(self.represent(item,field_type) \ 1364 for item in expression) 1365 elif isinstance(expression, bool): 1366 return '1' if expression else '0' 1367 else: 1368 return str(expression)
1369
1370 - def alias(self, table, alias):
1371 """ 1372 Given a table object, makes a new table object 1373 with alias name. 1374 """ 1375 other = copy.copy(table) 1376 other['_ot'] = other._tablename 1377 other['ALL'] = SQLALL(other) 1378 other['_tablename'] = alias 1379 for fieldname in other.fields: 1380 other[fieldname] = copy.copy(other[fieldname]) 1381 other[fieldname]._tablename = alias 1382 other[fieldname].tablename = alias 1383 other[fieldname].table = other 1384 table._db[alias] = other 1385 return other
1386
1387 - def _truncate(self, table, mode=''):
1388 tablename = table._tablename 1389 return ['TRUNCATE TABLE %s %s;' % (tablename, mode or '')]
1390
1391 - def truncate(self, table, mode= ' '):
1392 # Prepare functions "write_to_logfile" and "close_logfile" 1393 if table._dbt: 1394 logfile = self.file_open(table._loggername, 'a') 1395 else: 1396 class Logfile(object): 1397 def write(self, value): 1398 pass
1399 def close(self): 1400 pass 1401 logfile = Logfile() 1402 1403 try: 1404 queries = table._db._adapter._truncate(table, mode) 1405 for query in queries: 1406 logfile.write(query + '\n') 1407 self.execute(query) 1408 table._db.commit() 1409 logfile.write('success!\n') 1410 finally: 1411 logfile.close() 1412
1413 - def _update(self, tablename, query, fields):
1414 if query: 1415 if use_common_filters(query): 1416 query = self.common_filter(query, [tablename]) 1417 sql_w = ' WHERE ' + self.expand(query) 1418 else: 1419 sql_w = '' 1420 sql_v = ','.join(['%s=%s' % (field.name, 1421 self.expand(value, field.type)) \ 1422 for (field, value) in fields]) 1423 return 'UPDATE %s SET %s%s;' % (tablename, sql_v, sql_w)
1424
1425 - def update(self, tablename, query, fields):
1426 sql = self._update(tablename, query, fields) 1427 self.execute(sql) 1428 try: 1429 return self.cursor.rowcount 1430 except: 1431 return None
1432
1433 - def _delete(self, tablename, query):
1434 if query: 1435 if use_common_filters(query): 1436 query = self.common_filter(query, [tablename]) 1437 sql_w = ' WHERE ' + self.expand(query) 1438 else: 1439 sql_w = '' 1440 return 'DELETE FROM %s%s;' % (tablename, sql_w)
1441
1442 - def delete(self, tablename, query):
1443 sql = self._delete(tablename, query) 1444 ### special code to handle CASCADE in SQLite & SpatiaLite 1445 db = self.db 1446 table = db[tablename] 1447 if self.dbengine in ('sqlite', 'spatialite') and table._referenced_by: 1448 deleted = [x[table._id.name] for x in db(query).select(table._id)] 1449 ### end special code to handle CASCADE in SQLite & SpatiaLite 1450 self.execute(sql) 1451 try: 1452 counter = self.cursor.rowcount 1453 except: 1454 counter = None 1455 ### special code to handle CASCADE in SQLite & SpatiaLite 1456 if self.dbengine in ('sqlite', 'spatialite') and counter: 1457 for field in table._referenced_by: 1458 if field.type=='reference '+table._tablename \ 1459 and field.ondelete=='CASCADE': 1460 db(field.belongs(deleted)).delete() 1461 ### end special code to handle CASCADE in SQLite & SpatiaLite 1462 return counter
1463
1464 - def get_table(self, query):
1465 tablenames = self.tables(query) 1466 if len(tablenames)==1: 1467 return tablenames[0] 1468 elif len(tablenames)<1: 1469 raise RuntimeError("No table selected") 1470 else: 1471 raise RuntimeError("Too many tables selected")
1472
1473 - def expand_all(self, fields, tablenames):
1474 db = self.db 1475 new_fields = [] 1476 append = new_fields.append 1477 for item in fields: 1478 if isinstance(item,SQLALL): 1479 new_fields += item._table 1480 elif isinstance(item,str): 1481 if REGEX_TABLE_DOT_FIELD.match(item): 1482 tablename,fieldname = item.split('.') 1483 append(db[tablename][fieldname]) 1484 else: 1485 append(Expression(db,lambda item=item:item)) 1486 else: 1487 append(item) 1488 # ## if no fields specified take them all from the requested tables 1489 if not new_fields: 1490 for table in tablenames: 1491 for field in db[table]: 1492 append(field) 1493 return new_fields
1494
1495 - def _select(self, query, fields, attributes):
1496 tables = self.tables 1497 for key in set(attributes.keys())-SELECT_ARGS: 1498 raise SyntaxError('invalid select attribute: %s' % key) 1499 args_get = attributes.get 1500 tablenames = tables(query) 1501 for field in fields: 1502 if isinstance(field, basestring) \ 1503 and REGEX_TABLE_DOT_FIELD.match(field): 1504 tn,fn = field.split('.') 1505 field = self.db[tn][fn] 1506 for tablename in tables(field): 1507 if not tablename in tablenames: 1508 tablenames.append(tablename) 1509 1510 if len(tablenames) < 1: 1511 raise SyntaxError('Set: no tables selected') 1512 self._colnames = map(self.expand, fields) 1513 def geoexpand(field): 1514 if isinstance(field.type,str) and field.type.startswith('geometry'): 1515 field = field.st_astext() 1516 return self.expand(field)
1517 sql_f = ', '.join(map(geoexpand, fields)) 1518 sql_o = '' 1519 sql_s = '' 1520 left = args_get('left', False) 1521 inner_join = args_get('join', False) 1522 distinct = args_get('distinct', False) 1523 groupby = args_get('groupby', False) 1524 orderby = args_get('orderby', False) 1525 having = args_get('having', False) 1526 limitby = args_get('limitby', False) 1527 for_update = args_get('for_update', False) 1528 if self.can_select_for_update is False and for_update is True: 1529 raise SyntaxError('invalid select attribute: for_update') 1530 if distinct is True: 1531 sql_s += 'DISTINCT' 1532 elif distinct: 1533 sql_s += 'DISTINCT ON (%s)' % distinct 1534 if inner_join: 1535 icommand = self.JOIN() 1536 if not isinstance(inner_join, (tuple, list)): 1537 inner_join = [inner_join] 1538 ijoint = [t._tablename for t in inner_join 1539 if not isinstance(t,Expression)] 1540 ijoinon = [t for t in inner_join if isinstance(t, Expression)] 1541 itables_to_merge={} #issue 490 1542 [itables_to_merge.update( 1543 dict.fromkeys(tables(t))) for t in ijoinon] 1544 ijoinont = [t.first._tablename for t in ijoinon] 1545 [itables_to_merge.pop(t) for t in ijoinont 1546 if t in itables_to_merge] #issue 490 1547 iimportant_tablenames = ijoint + ijoinont + itables_to_merge.keys() 1548 iexcluded = [t for t in tablenames 1549 if not t in iimportant_tablenames] 1550 if left: 1551 join = attributes['left'] 1552 command = self.LEFT_JOIN() 1553 if not isinstance(join, (tuple, list)): 1554 join = [join] 1555 joint = [t._tablename for t in join 1556 if not isinstance(t, Expression)] 1557 joinon = [t for t in join if isinstance(t, Expression)] 1558 #patch join+left patch (solves problem with ordering in left joins) 1559 tables_to_merge={} 1560 [tables_to_merge.update( 1561 dict.fromkeys(tables(t))) for t in joinon] 1562 joinont = [t.first._tablename for t in joinon] 1563 [tables_to_merge.pop(t) for t in joinont if t in tables_to_merge] 1564 important_tablenames = joint + joinont + tables_to_merge.keys() 1565 excluded = [t for t in tablenames 1566 if not t in important_tablenames ] 1567 else: 1568 excluded = tablenames 1569 1570 if use_common_filters(query): 1571 query = self.common_filter(query,excluded) 1572 sql_w = ' WHERE ' + self.expand(query) if query else '' 1573 1574 def alias(t): 1575 return str(self.db[t]) 1576 if inner_join and not left: 1577 sql_t = ', '.join([alias(t) for t in iexcluded + \ 1578 itables_to_merge.keys()]) 1579 for t in ijoinon: 1580 sql_t += ' %s %s' % (icommand, str(t)) 1581 elif not inner_join and left: 1582 sql_t = ', '.join([alias(t) for t in excluded + \ 1583 tables_to_merge.keys()]) 1584 if joint: 1585 sql_t += ' %s %s' % (command, ','.join([t for t in joint])) 1586 for t in joinon: 1587 sql_t += ' %s %s' % (command, str(t)) 1588 elif inner_join and left: 1589 all_tables_in_query = set(important_tablenames + \ 1590 iimportant_tablenames + \ 1591 tablenames) 1592 tables_in_joinon = set(joinont + ijoinont) 1593 tables_not_in_joinon = \ 1594 all_tables_in_query.difference(tables_in_joinon) 1595 sql_t = ','.join([alias(t) for t in tables_not_in_joinon]) 1596 for t in ijoinon: 1597 sql_t += ' %s %s' % (icommand, str(t)) 1598 if joint: 1599 sql_t += ' %s %s' % (command, ','.join([t for t in joint])) 1600 for t in joinon: 1601 sql_t += ' %s %s' % (command, str(t)) 1602 else: 1603 sql_t = ', '.join(alias(t) for t in tablenames) 1604 if groupby: 1605 if isinstance(groupby, (list, tuple)): 1606 groupby = xorify(groupby) 1607 sql_o += ' GROUP BY %s' % self.expand(groupby) 1608 if having: 1609 sql_o += ' HAVING %s' % attributes['having'] 1610 if orderby: 1611 if isinstance(orderby, (list, tuple)): 1612 orderby = xorify(orderby) 1613 if str(orderby) == '<random>': 1614 sql_o += ' ORDER BY %s' % self.RANDOM() 1615 else: 1616 sql_o += ' ORDER BY %s' % self.expand(orderby) 1617 if limitby: 1618 if not orderby and tablenames: 1619 sql_o += ' ORDER BY %s' % ', '.join(['%s.%s'%(t,x) for t in tablenames for x in (hasattr(self.db[t],'_primarykey') and self.db[t]._primarykey or [self.db[t]._id.name])]) 1620 # oracle does not support limitby 1621 sql = self.select_limitby(sql_s, sql_f, sql_t, sql_w, sql_o, limitby) 1622 if for_update and self.can_select_for_update is True: 1623 sql = sql.rstrip(';') + ' FOR UPDATE;' 1624 return sql 1625
1626 - def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
1627 if limitby: 1628 (lmin, lmax) = limitby 1629 sql_o += ' LIMIT %i OFFSET %i' % (lmax - lmin, lmin) 1630 return 'SELECT %s %s FROM %s%s%s;' % \ 1631 (sql_s, sql_f, sql_t, sql_w, sql_o)
1632
1633 - def _fetchall(self):
1634 return self.cursor.fetchall()
1635
1636 - def _select_aux(self,sql,fields,attributes):
1637 args_get = attributes.get 1638 cache = args_get('cache',None) 1639 if not cache: 1640 self.execute(sql) 1641 rows = self._fetchall() 1642 else: 1643 (cache_model, time_expire) = cache 1644 key = self.uri + '/' + sql + '/rows' 1645 if len(key)>200: key = hashlib_md5(key).hexdigest() 1646 def _select_aux2(): 1647 self.execute(sql) 1648 return self._fetchall()
1649 rows = cache_model(key,_select_aux2,time_expire) 1650 if isinstance(rows,tuple): 1651 rows = list(rows) 1652 limitby = args_get('limitby', None) or (0,) 1653 rows = self.rowslice(rows,limitby[0],None) 1654 processor = args_get('processor',self.parse) 1655 cacheable = args_get('cacheable',False) 1656 return processor(rows,fields,self._colnames,cacheable=cacheable) 1657
1658 - def select(self, query, fields, attributes):
1659 """ 1660 Always returns a Rows object, possibly empty. 1661 """ 1662 sql = self._select(query, fields, attributes) 1663 cache = attributes.get('cache', None) 1664 if cache and attributes.get('cacheable',False): 1665 del attributes['cache'] 1666 (cache_model, time_expire) = cache 1667 key = self.uri + '/' + sql 1668 if len(key)>200: key = hashlib_md5(key).hexdigest() 1669 args = (sql,fields,attributes) 1670 return cache_model( 1671 key, 1672 lambda self=self,args=args:self._select_aux(*args), 1673 time_expire) 1674 else: 1675 return self._select_aux(sql,fields,attributes)
1676
1677 - def _count(self, query, distinct=None):
1678 tablenames = self.tables(query) 1679 if query: 1680 if use_common_filters(query): 1681 query = self.common_filter(query, tablenames) 1682 sql_w = ' WHERE ' + self.expand(query) 1683 else: 1684 sql_w = '' 1685 sql_t = ','.join(tablenames) 1686 if distinct: 1687 if isinstance(distinct,(list, tuple)): 1688 distinct = xorify(distinct) 1689 sql_d = self.expand(distinct) 1690 return 'SELECT count(DISTINCT %s) FROM %s%s;' % \ 1691 (sql_d, sql_t, sql_w) 1692 return 'SELECT count(*) FROM %s%s;' % (sql_t, sql_w)
1693
1694 - def count(self, query, distinct=None):
1695 self.execute(self._count(query, distinct)) 1696 return self.cursor.fetchone()[0]
1697
1698 - def tables(self, *queries):
1699 tables = set() 1700 for query in queries: 1701 if isinstance(query, Field): 1702 tables.add(query.tablename) 1703 elif isinstance(query, (Expression, Query)): 1704 if not query.first is None: 1705 tables = tables.union(self.tables(query.first)) 1706 if not query.second is None: 1707 tables = tables.union(self.tables(query.second)) 1708 return list(tables)
1709
1710 - def commit(self):
1711 if self.connection: return self.connection.commit()
1712
1713 - def rollback(self):
1714 if self.connection: return self.connection.rollback()
1715
1716 - def close_connection(self):
1717 if self.connection: return self.connection.close()
1718
1719 - def distributed_transaction_begin(self, key):
1720 return
1721
1722 - def prepare(self, key):
1723 if self.connection: self.connection.prepare()
1724
1725 - def commit_prepared(self, key):
1726 if self.connection: self.connection.commit()
1727
1728 - def rollback_prepared(self, key):
1729 if self.connection: self.connection.rollback()
1730
1731 - def concat_add(self, tablename):
1732 return ', ADD '
1733
1734 - def constraint_name(self, table, fieldname):
1735 return '%s_%s__constraint' % (table,fieldname)
1736
1737 - def create_sequence_and_triggers(self, query, table, **args):
1738 self.execute(query)
1739
1740 - def log_execute(self, *a, **b):
1741 if not self.connection: return None 1742 command = a[0] 1743 if self.db._debug: 1744 LOGGER.debug('SQL: %s' % command) 1745 self.db._lastsql = command 1746 t0 = time.time() 1747 ret = self.cursor.execute(*a, **b) 1748 self.db._timings.append((command,time.time()-t0)) 1749 del self.db._timings[:-TIMINGSSIZE] 1750 return ret
1751
1752 - def execute(self, *a, **b):
1753 return self.log_execute(*a, **b)
1754
1755 - def represent(self, obj, fieldtype):
1756 field_is_type = fieldtype.startswith 1757 if isinstance(obj, CALLABLETYPES): 1758 obj = obj() 1759 if isinstance(fieldtype, SQLCustomType): 1760 value = fieldtype.encoder(obj) 1761 if fieldtype.type in ('string','text', 'json'): 1762 return self.adapt(value) 1763 return value 1764 if isinstance(obj, (Expression, Field)): 1765 return str(obj) 1766 if field_is_type('list:'): 1767 if not obj: 1768 obj = [] 1769 elif not isinstance(obj, (list, tuple)): 1770 obj = [obj] 1771 if field_is_type('list:string'): 1772 obj = map(str,obj) 1773 else: 1774 obj = map(int,obj) 1775 # we don't want to bar_encode json objects 1776 if isinstance(obj, (list, tuple)) and (not fieldtype == "json"): 1777 obj = bar_encode(obj) 1778 if obj is None: 1779 return 'NULL' 1780 if obj == '' and not fieldtype[:2] in ['st', 'te', 'js', 'pa', 'up']: 1781 return 'NULL' 1782 r = self.represent_exceptions(obj, fieldtype) 1783 if not r is None: 1784 return r 1785 if fieldtype == 'boolean': 1786 if obj and not str(obj)[:1].upper() in '0F': 1787 return self.smart_adapt(self.TRUE) 1788 else: 1789 return self.smart_adapt(self.FALSE) 1790 if fieldtype == 'id' or fieldtype == 'integer': 1791 return str(int(obj)) 1792 if field_is_type('decimal'): 1793 return str(obj) 1794 elif field_is_type('reference'): # reference 1795 if fieldtype.find('.')>0: 1796 return repr(obj) 1797 elif isinstance(obj, (Row, Reference)): 1798 return str(obj['id']) 1799 return str(int(obj)) 1800 elif fieldtype == 'double': 1801 return repr(float(obj)) 1802 if isinstance(obj, unicode): 1803 obj = obj.encode(self.db_codec) 1804 if fieldtype == 'blob': 1805 obj = base64.b64encode(str(obj)) 1806 elif fieldtype == 'date': 1807 if isinstance(obj, (datetime.date, datetime.datetime)): 1808 obj = obj.isoformat()[:10] 1809 else: 1810 obj = str(obj) 1811 elif fieldtype == 'datetime': 1812 if isinstance(obj, datetime.datetime): 1813 obj = obj.isoformat()[:19].replace('T',' ') 1814 elif isinstance(obj, datetime.date): 1815 obj = obj.isoformat()[:10]+' 00:00:00' 1816 else: 1817 obj = str(obj) 1818 elif fieldtype == 'time': 1819 if isinstance(obj, datetime.time): 1820 obj = obj.isoformat()[:10] 1821 else: 1822 obj = str(obj) 1823 elif fieldtype == 'json': 1824 if not self.native_json: 1825 if have_serializers: 1826 obj = serializers.json(obj) 1827 elif simplejson: 1828 obj = simplejson.dumps(items) 1829 else: 1830 raise RuntimeError("missing simplejson") 1831 if not isinstance(obj,bytes): 1832 obj = bytes(obj) 1833 try: 1834 obj.decode(self.db_codec) 1835 except: 1836 obj = obj.decode('latin1').encode(self.db_codec) 1837 return self.adapt(obj)
1838
1839 - def represent_exceptions(self, obj, fieldtype):
1840 return None
1841
1842 - def lastrowid(self, table):
1843 return None
1844
1845 - def integrity_error_class(self):
1846 return type(None)
1847
1848 - def rowslice(self, rows, minimum=0, maximum=None):
1849 """ 1850 By default this function does nothing; 1851 overload when db does not do slicing. 1852 """ 1853 return rows
1854
1855 - def parse_value(self, value, field_type, blob_decode=True):
1856 if field_type != 'blob' and isinstance(value, str): 1857 try: 1858 value = value.decode(self.db._db_codec) 1859 except Exception: 1860 pass 1861 if isinstance(value, unicode): 1862 value = value.encode('utf-8') 1863 if isinstance(field_type, SQLCustomType): 1864 value = field_type.decoder(value) 1865 if not isinstance(field_type, str) or value is None: 1866 return value 1867 elif field_type in ('string', 'text', 'password', 'upload', 'dict'): 1868 return value 1869 elif field_type.startswith('geo'): 1870 return value 1871 elif field_type == 'blob' and not blob_decode: 1872 return value 1873 else: 1874 key = REGEX_TYPE.match(field_type).group(0) 1875 return self.parsemap[key](value,field_type)
1876
1877 - def parse_reference(self, value, field_type):
1878 referee = field_type[10:].strip() 1879 if not '.' in referee: 1880 value = Reference(value) 1881 value._table, value._record = self.db[referee], None 1882 return value
1883
1884 - def parse_boolean(self, value, field_type):
1885 return value == True or str(value)[:1].lower() == 't'
1886
1887 - def parse_date(self, value, field_type):
1888 if isinstance(value, datetime.datetime): 1889 return value.date() 1890 if not isinstance(value, (datetime.date,datetime.datetime)): 1891 (y, m, d) = map(int, str(value)[:10].strip().split('-')) 1892 value = datetime.date(y, m, d) 1893 return value
1894
1895 - def parse_time(self, value, field_type):
1896 if not isinstance(value, datetime.time): 1897 time_items = map(int,str(value)[:8].strip().split(':')[:3]) 1898 if len(time_items) == 3: 1899 (h, mi, s) = time_items 1900 else: 1901 (h, mi, s) = time_items + [0] 1902 value = datetime.time(h, mi, s) 1903 return value
1904
1905 - def parse_datetime(self, value, field_type):
1906 if not isinstance(value, datetime.datetime): 1907 value = str(value) 1908 date_part,time_part,timezone = value[:10],value[11:19],value[19:] 1909 if '+' in timezone: 1910 ms,tz = timezone.split('+') 1911 h,m = tz.split(':') 1912 dt = datetime.timedelta(seconds=3600*int(h)+60*int(m)) 1913 elif '-' in timezone: 1914 ms,tz = timezone.split('-') 1915 h,m = tz.split(':') 1916 dt = -datetime.timedelta(seconds=3600*int(h)+60*int(m)) 1917 else: 1918 dt = None 1919 (y, m, d) = map(int,date_part.split('-')) 1920 time_parts = time_part and time_part.split(':')[:3] or (0,0,0) 1921 while len(time_parts)<3: time_parts.append(0) 1922 time_items = map(int,time_parts) 1923 (h, mi, s) = time_items 1924 value = datetime.datetime(y, m, d, h, mi, s) 1925 if dt: 1926 value = value + dt 1927 return value
1928
1929 - def parse_blob(self, value, field_type):
1930 return base64.b64decode(str(value))
1931
1932 - def parse_decimal(self, value, field_type):
1933 decimals = int(field_type[8:-1].split(',')[-1]) 1934 if self.dbengine in ('sqlite', 'spatialite'): 1935 value = ('%.' + str(decimals) + 'f') % value 1936 if not isinstance(value, decimal.Decimal): 1937 value = decimal.Decimal(str(value)) 1938 return value
1939
1940 - def parse_list_integers(self, value, field_type):
1941 if not self.dbengine=='google:datastore': 1942 value = bar_decode_integer(value) 1943 return value
1944
1945 - def parse_list_references(self, value, field_type):
1946 if not self.dbengine=='google:datastore': 1947 value = bar_decode_integer(value) 1948 return [self.parse_reference(r, field_type[5:]) for r in value]
1949
1950 - def parse_list_strings(self, value, field_type):
1951 if not self.dbengine=='google:datastore': 1952 value = bar_decode_string(value) 1953 return value
1954
1955 - def parse_id(self, value, field_type):
1956 return int(value)
1957
1958 - def parse_integer(self, value, field_type):
1959 return int(value)
1960
1961 - def parse_double(self, value, field_type):
1962 return float(value)
1963
1964 - def parse_json(self, value, field_type):
1965 if not self.native_json: 1966 if not isinstance(value, basestring): 1967 raise RuntimeError('json data not a string') 1968 if isinstance(value, unicode): 1969 value = value.encode('utf-8') 1970 if have_serializers: 1971 value = serializers.loads_json(value) 1972 elif simplejson: 1973 value = simplejson.loads(value) 1974 else: 1975 raise RuntimeError("missing simplejson") 1976 return value
1977
1978 - def build_parsemap(self):
1979 self.parsemap = { 1980 'id':self.parse_id, 1981 'integer':self.parse_integer, 1982 'bigint':self.parse_integer, 1983 'float':self.parse_double, 1984 'double':self.parse_double, 1985 'reference':self.parse_reference, 1986 'boolean':self.parse_boolean, 1987 'date':self.parse_date, 1988 'time':self.parse_time, 1989 'datetime':self.parse_datetime, 1990 'blob':self.parse_blob, 1991 'decimal':self.parse_decimal, 1992 'json':self.parse_json, 1993 'list:integer':self.parse_list_integers, 1994 'list:reference':self.parse_list_references, 1995 'list:string':self.parse_list_strings, 1996 }
1997
1998 - def parse(self, rows, fields, colnames, blob_decode=True, 1999 cacheable = False):
2000 self.build_parsemap() 2001 db = self.db 2002 virtualtables = [] 2003 new_rows = [] 2004 tmps = [] 2005 for colname in colnames: 2006 if not REGEX_TABLE_DOT_FIELD.match(colname): 2007 tmps.append(None) 2008 else: 2009 (tablename, fieldname) = colname.split('.') 2010 table = db[tablename] 2011 field = table[fieldname] 2012 ft = field.type 2013 tmps.append((tablename,fieldname,table,field,ft)) 2014 for (i,row) in enumerate(rows): 2015 new_row = Row() 2016 for (j,colname) in enumerate(colnames): 2017 value = row[j] 2018 tmp = tmps[j] 2019 if tmp: 2020 (tablename,fieldname,table,field,ft) = tmp 2021 if tablename in new_row: 2022 colset = new_row[tablename] 2023 else: 2024 colset = new_row[tablename] = Row() 2025 if tablename not in virtualtables: 2026 virtualtables.append(tablename) 2027 value = self.parse_value(value,ft,blob_decode) 2028 if field.filter_out: 2029 value = field.filter_out(value) 2030 colset[fieldname] = value 2031 2032 # for backward compatibility 2033 if ft=='id' and fieldname!='id' and \ 2034 not 'id' in table.fields: 2035 colset['id'] = value 2036 2037 if ft == 'id' and not cacheable: 2038 # temporary hack to deal with 2039 # GoogleDatastoreAdapter 2040 # references 2041 if isinstance(self, GoogleDatastoreAdapter): 2042 id = value.key().id_or_name() 2043 colset[fieldname] = id 2044 colset.gae_item = value 2045 else: 2046 id = value 2047 colset.update_record = RecordUpdater(colset,table,id) 2048 colset.delete_record = RecordDeleter(table,id) 2049 for rfield in table._referenced_by: 2050 referee_link = db._referee_name and \ 2051 db._referee_name % dict( 2052 table=rfield.tablename,field=rfield.name) 2053 if referee_link and not referee_link in colset: 2054 colset[referee_link] = LazySet(rfield,id) 2055 else: 2056 if not '_extra' in new_row: 2057 new_row['_extra'] = Row() 2058 new_row['_extra'][colname] = \ 2059 self.parse_value(value, 2060 fields[j].type,blob_decode) 2061 new_column_name = \ 2062 REGEX_SELECT_AS_PARSER.search(colname) 2063 if not new_column_name is None: 2064 column_name = new_column_name.groups(0) 2065 setattr(new_row,column_name[0],value) 2066 new_rows.append(new_row) 2067 rowsobj = Rows(db, new_rows, colnames, rawrows=rows) 2068 2069 for tablename in virtualtables: 2070 ### new style virtual fields 2071 table = db[tablename] 2072 fields_virtual = [(f,v) for (f,v) in table.iteritems() 2073 if isinstance(v,FieldVirtual)] 2074 fields_lazy = [(f,v) for (f,v) in table.iteritems() 2075 if isinstance(v,FieldMethod)] 2076 if fields_virtual or fields_lazy: 2077 for row in rowsobj.records: 2078 box = row[tablename] 2079 for f,v in fields_virtual: 2080 box[f] = v.f(row) 2081 for f,v in fields_lazy: 2082 box[f] = (v.handler or VirtualCommand)(v.f,row) 2083 2084 ### old style virtual fields 2085 for item in table.virtualfields: 2086 try: 2087 rowsobj = rowsobj.setvirtualfields(**{tablename:item}) 2088 except (KeyError, AttributeError): 2089 # to avoid breaking virtualfields when partial select 2090 pass 2091 return rowsobj
2092
2093 - def common_filter(self, query, tablenames):
2094 tenant_fieldname = self.db._request_tenant 2095 2096 for tablename in tablenames: 2097 table = self.db[tablename] 2098 2099 # deal with user provided filters 2100 if table._common_filter != None: 2101 query = query & table._common_filter(query) 2102 2103 # deal with multi_tenant filters 2104 if tenant_fieldname in table: 2105 default = table[tenant_fieldname].default 2106 if not default is None: 2107 newquery = table[tenant_fieldname] == default 2108 if query is None: 2109 query = newquery 2110 else: 2111 query = query & newquery 2112 return query
2113
2114 - def CASE(self,query,t,f):
2115 def represent(x): 2116 types = {type(True):'boolean',type(0):'integer',type(1.0):'double'} 2117 if x is None: return 'NULL' 2118 elif isinstance(x,Expression): return str(x) 2119 else: return self.represent(x,types.get(type(x),'string'))
2120 return Expression(self.db,'CASE WHEN %s THEN %s ELSE %s END' % \ 2121 (self.expand(query),represent(t),represent(f))) 2122
2123 ################################################################################### 2124 # List of all the available adapters; they all extend BaseAdapter. 2125 ################################################################################### 2126 2127 -class SQLiteAdapter(BaseAdapter):
2128 drivers = ('sqlite2','sqlite3') 2129 2130 can_select_for_update = None # support ourselves with BEGIN TRANSACTION 2131
2132 - def EXTRACT(self,field,what):
2133 return "web2py_extract('%s',%s)" % (what, self.expand(field))
2134 2135 @staticmethod
2136 - def web2py_extract(lookup, s):
2137 table = { 2138 'year': (0, 4), 2139 'month': (5, 7), 2140 'day': (8, 10), 2141 'hour': (11, 13), 2142 'minute': (14, 16), 2143 'second': (17, 19), 2144 } 2145 try: 2146 if lookup != 'epoch': 2147 (i, j) = table[lookup] 2148 return int(s[i:j]) 2149 else: 2150 return time.mktime(datetime.datetime.strptime(s, '%Y-%m-%d %H:%M:%S').timetuple()) 2151 except: 2152 return None
2153 2154 @staticmethod
2155 - def web2py_regexp(expression, item):
2156 return re.compile(expression).search(item) is not None
2157
2158 - def __init__(self, db, uri, pool_size=0, folder=None, db_codec ='UTF-8', 2159 credential_decoder=IDENTITY, driver_args={}, 2160 adapter_args={}, do_connect=True, after_connection=None):
2161 self.db = db 2162 self.dbengine = "sqlite" 2163 self.uri = uri 2164 if do_connect: self.find_driver(adapter_args) 2165 self.pool_size = 0 2166 self.folder = folder 2167 self.db_codec = db_codec 2168 self._after_connection = after_connection 2169 self.find_or_make_work_folder() 2170 path_encoding = sys.getfilesystemencoding() \ 2171 or locale.getdefaultlocale()[1] or 'utf8' 2172 if uri.startswith('sqlite:memory'): 2173 dbpath = ':memory:' 2174 else: 2175 dbpath = uri.split('://',1)[1] 2176 if dbpath[0] != '/': 2177 if PYTHON_VERSION == 2: 2178 dbpath = pjoin( 2179 self.folder.decode(path_encoding).encode('utf8'), dbpath) 2180 else: 2181 dbpath = pjoin(self.folder, dbpath) 2182 if not 'check_same_thread' in driver_args: 2183 driver_args['check_same_thread'] = False 2184 if not 'detect_types' in driver_args and do_connect: 2185 driver_args['detect_types'] = self.driver.PARSE_DECLTYPES 2186 def connector(dbpath=dbpath, driver_args=driver_args): 2187 return self.driver.Connection(dbpath, **driver_args)
2188 self.connector = connector 2189 if do_connect: self.reconnect()
2190
2191 - def after_connection(self):
2192 self.connection.create_function('web2py_extract', 2, 2193 SQLiteAdapter.web2py_extract) 2194 self.connection.create_function("REGEXP", 2, 2195 SQLiteAdapter.web2py_regexp)
2196
2197 - def _truncate(self, table, mode=''):
2198 tablename = table._tablename 2199 return ['DELETE FROM %s;' % tablename, 2200 "DELETE FROM sqlite_sequence WHERE name='%s';" % tablename]
2201
2202 - def lastrowid(self, table):
2203 return self.cursor.lastrowid
2204
2205 - def REGEXP(self,first,second):
2206 return '(%s REGEXP %s)' % (self.expand(first), 2207 self.expand(second,'string'))
2208
2209 - def select(self, query, fields, attributes):
2210 """ 2211 Simulate SELECT ... FOR UPDATE with BEGIN IMMEDIATE TRANSACTION. 2212 Note that the entire database, rather than one record, is locked 2213 (it will be locked eventually anyway by the following UPDATE). 2214 """ 2215 if attributes.get('for_update', False) and not 'cache' in attributes: 2216 self.execute('BEGIN IMMEDIATE TRANSACTION;') 2217 return super(SQLiteAdapter, self).select(query, fields, attributes)
2218
2219 -class SpatiaLiteAdapter(SQLiteAdapter):
2220 drivers = ('sqlite3','sqlite2') 2221 2222 types = copy.copy(BaseAdapter.types) 2223 types.update(geometry='GEOMETRY') 2224
2225 - def __init__(self, db, uri, pool_size=0, folder=None, db_codec ='UTF-8', 2226 credential_decoder=IDENTITY, driver_args={}, 2227 adapter_args={}, do_connect=True, srid=4326, after_connection=None):
2228 self.db = db 2229 self.dbengine = "spatialite" 2230 self.uri = uri 2231 if do_connect: self.find_driver(adapter_args) 2232 self.pool_size = 0 2233 self.folder = folder 2234 self.db_codec = db_codec 2235 self._after_connection = after_connection 2236 self.find_or_make_work_folder() 2237 self.srid = srid 2238 path_encoding = sys.getfilesystemencoding() \ 2239 or locale.getdefaultlocale()[1] or 'utf8' 2240 if uri.startswith('spatialite:memory'): 2241 dbpath = ':memory:' 2242 else: 2243 dbpath = uri.split('://',1)[1] 2244 if dbpath[0] != '/': 2245 dbpath = pjoin( 2246 self.folder.decode(path_encoding).encode('utf8'), dbpath) 2247 if not 'check_same_thread' in driver_args: 2248 driver_args['check_same_thread'] = False 2249 if not 'detect_types' in driver_args and do_connect: 2250 driver_args['detect_types'] = self.driver.PARSE_DECLTYPES 2251 def connector(dbpath=dbpath, driver_args=driver_args): 2252 return self.driver.Connection(dbpath, **driver_args)
2253 self.connector = connector 2254 if do_connect: self.reconnect()
2255
2256 - def after_connection(self):
2257 self.connection.enable_load_extension(True) 2258 # for Windows, rename libspatialite-2.dll to libspatialite.dll 2259 # Linux uses libspatialite.so 2260 # Mac OS X uses libspatialite.dylib 2261 libspatialite = SPATIALLIBS[platform.system()] 2262 self.execute(r'SELECT load_extension("%s");') % libspatialite 2263 2264 self.connection.create_function('web2py_extract', 2, 2265 SQLiteAdapter.web2py_extract) 2266 self.connection.create_function("REGEXP", 2, 2267 SQLiteAdapter.web2py_regexp)
2268 2269 # GIS functions 2270
2271 - def ST_ASGEOJSON(self, first, second):
2272 return 'AsGeoJSON(%s,%s,%s)' %(self.expand(first), 2273 second['precision'], second['options'])
2274
2275 - def ST_ASTEXT(self, first):
2276 return 'AsText(%s)' %(self.expand(first))
2277
2278 - def ST_CONTAINS(self, first, second):
2279 return 'Contains(%s,%s)' %(self.expand(first), 2280 self.expand(second, first.type))
2281
2282 - def ST_DISTANCE(self, first, second):
2283 return 'Distance(%s,%s)' %(self.expand(first), 2284 self.expand(second, first.type))
2285
2286 - def ST_EQUALS(self, first, second):
2287 return 'Equals(%s,%s)' %(self.expand(first), 2288 self.expand(second, first.type))
2289
2290 - def ST_INTERSECTS(self, first, second):
2291 return 'Intersects(%s,%s)' %(self.expand(first), 2292 self.expand(second, first.type))
2293
2294 - def ST_OVERLAPS(self, first, second):
2295 return 'Overlaps(%s,%s)' %(self.expand(first), 2296 self.expand(second, first.type))
2297
2298 - def ST_SIMPLIFY(self, first, second):
2299 return 'Simplify(%s,%s)' %(self.expand(first), 2300 self.expand(second, 'double'))
2301
2302 - def ST_TOUCHES(self, first, second):
2303 return 'Touches(%s,%s)' %(self.expand(first), 2304 self.expand(second, first.type))
2305
2306 - def ST_WITHIN(self, first, second):
2307 return 'Within(%s,%s)' %(self.expand(first), 2308 self.expand(second, first.type))
2309
2310 - def represent(self, obj, fieldtype):
2311 field_is_type = fieldtype.startswith 2312 if field_is_type('geo'): 2313 srid = 4326 # Spatialite default srid for geometry 2314 geotype, parms = fieldtype[:-1].split('(') 2315 parms = parms.split(',') 2316 if len(parms) >= 2: 2317 schema, srid = parms[:2] 2318 # if field_is_type('geometry'): 2319 value = "ST_GeomFromText('%s',%s)" %(obj, srid) 2320 # elif field_is_type('geography'): 2321 # value = "ST_GeogFromText('SRID=%s;%s')" %(srid, obj) 2322 # else: 2323 # raise SyntaxError, 'Invalid field type %s' %fieldtype 2324 return value 2325 return BaseAdapter.represent(self, obj, fieldtype)
2326
2327 2328 -class JDBCSQLiteAdapter(SQLiteAdapter):
2329 drivers = ('zxJDBC_sqlite',) 2330
2331 - def __init__(self, db, uri, pool_size=0, folder=None, db_codec='UTF-8', 2332 credential_decoder=IDENTITY, driver_args={}, 2333 adapter_args={}, do_connect=True, after_connection=None):
2334 self.db = db 2335 self.dbengine = "sqlite" 2336 self.uri = uri 2337 if do_connect: self.find_driver(adapter_args) 2338 self.pool_size = pool_size 2339 self.folder = folder 2340 self.db_codec = db_codec 2341 self._after_connection = after_connection 2342 self.find_or_make_work_folder() 2343 path_encoding = sys.getfilesystemencoding() \ 2344 or locale.getdefaultlocale()[1] or 'utf8' 2345 if uri.startswith('sqlite:memory'): 2346 dbpath = ':memory:' 2347 else: 2348 dbpath = uri.split('://',1)[1] 2349 if dbpath[0] != '/': 2350 dbpath = pjoin( 2351 self.folder.decode(path_encoding).encode('utf8'), dbpath) 2352 def connector(dbpath=dbpath,driver_args=driver_args): 2353 return self.driver.connect( 2354 self.driver.getConnection('jdbc:sqlite:'+dbpath), 2355 **driver_args)
2356 self.connector = connector 2357 if do_connect: self.reconnect()
2358
2359 - def after_connection(self):
2360 # FIXME http://www.zentus.com/sqlitejdbc/custom_functions.html for UDFs 2361 self.connection.create_function('web2py_extract', 2, 2362 SQLiteAdapter.web2py_extract)
2363
2364 - def execute(self, a):
2365 return self.log_execute(a)
2366
2367 2368 -class MySQLAdapter(BaseAdapter):
2369 drivers = ('MySQLdb','pymysql') 2370 2371 maxcharlength = 255 2372 commit_on_alter_table = True 2373 support_distributed_transaction = True 2374 types = { 2375 'boolean': 'CHAR(1)', 2376 'string': 'VARCHAR(%(length)s)', 2377 'text': 'LONGTEXT', 2378 'json': 'LONGTEXT', 2379 'password': 'VARCHAR(%(length)s)', 2380 'blob': 'LONGBLOB', 2381 'upload': 'VARCHAR(%(length)s)', 2382 'integer': 'INT', 2383 'bigint': 'BIGINT', 2384 'float': 'FLOAT', 2385 'double': 'DOUBLE', 2386 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', 2387 'date': 'DATE', 2388 'time': 'TIME', 2389 'datetime': 'DATETIME', 2390 'id': 'INT AUTO_INCREMENT NOT NULL', 2391 'reference': 'INT, INDEX %(index_name)s (%(field_name)s), FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 2392 'list:integer': 'LONGTEXT', 2393 'list:string': 'LONGTEXT', 2394 'list:reference': 'LONGTEXT', 2395 'big-id': 'BIGINT AUTO_INCREMENT NOT NULL', 2396 'big-reference': 'BIGINT, INDEX %(index_name)s (%(field_name)s), FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 2397 } 2398
2399 - def varquote(self,name):
2400 return varquote_aux(name,'`%s`')
2401
2402 - def RANDOM(self):
2403 return 'RAND()'
2404
2405 - def SUBSTRING(self,field,parameters):
2406 return 'SUBSTRING(%s,%s,%s)' % (self.expand(field), 2407 parameters[0], parameters[1])
2408
2409 - def EPOCH(self, first):
2410 return "UNIX_TIMESTAMP(%s)" % self.expand(first)
2411
2412 - def REGEXP(self,first,second):
2413 return '(%s REGEXP %s)' % (self.expand(first), 2414 self.expand(second,'string'))
2415
2416 - def _drop(self,table,mode):
2417 # breaks db integrity but without this mysql does not drop table 2418 return ['SET FOREIGN_KEY_CHECKS=0;','DROP TABLE %s;' % table, 2419 'SET FOREIGN_KEY_CHECKS=1;']
2420
2421 - def distributed_transaction_begin(self,key):
2422 self.execute('XA START;')
2423
2424 - def prepare(self,key):
2425 self.execute("XA END;") 2426 self.execute("XA PREPARE;")
2427
2428 - def commit_prepared(self,ley):
2429 self.execute("XA COMMIT;")
2430
2431 - def rollback_prepared(self,key):
2432 self.execute("XA ROLLBACK;")
2433 2434 REGEX_URI = re.compile('^(?P<user>[^:@]+)(\:(?P<password>[^@]*))?@(?P<host>[^\:/]+)(\:(?P<port>[0-9]+))?/(?P<db>[^?]+)(\?set_encoding=(?P<charset>\w+))?$') 2435
2436 - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', 2437 credential_decoder=IDENTITY, driver_args={}, 2438 adapter_args={}, do_connect=True, after_connection=None):
2439 self.db = db 2440 self.dbengine = "mysql" 2441 self.uri = uri 2442 if do_connect: self.find_driver(adapter_args,uri) 2443 self.pool_size = pool_size 2444 self.folder = folder 2445 self.db_codec = db_codec 2446 self._after_connection = after_connection 2447 self.find_or_make_work_folder() 2448 ruri = uri.split('://',1)[1] 2449 m = self.REGEX_URI.match(ruri) 2450 if not m: 2451 raise SyntaxError( 2452 "Invalid URI string in DAL: %s" % self.uri) 2453 user = credential_decoder(m.group('user')) 2454 if not user: 2455 raise SyntaxError('User required') 2456 password = credential_decoder(m.group('password')) 2457 if not password: 2458 password = '' 2459 host = m.group('host') 2460 if not host: 2461 raise SyntaxError('Host name required') 2462 db = m.group('db') 2463 if not db: 2464 raise SyntaxError('Database name required') 2465 port = int(m.group('port') or '3306') 2466 charset = m.group('charset') or 'utf8' 2467 driver_args.update(db=db, 2468 user=credential_decoder(user), 2469 passwd=credential_decoder(password), 2470 host=host, 2471 port=port, 2472 charset=charset) 2473 2474 2475 def connector(driver_args=driver_args): 2476 return self.driver.connect(**driver_args)
2477 self.connector = connector 2478 if do_connect: self.reconnect()
2479
2480 - def after_connection(self):
2481 self.execute('SET FOREIGN_KEY_CHECKS=1;') 2482 self.execute("SET sql_mode='NO_BACKSLASH_ESCAPES';")
2483
2484 - def lastrowid(self,table):
2485 self.execute('select last_insert_id();') 2486 return int(self.cursor.fetchone()[0])
2487
2488 -class PostgreSQLAdapter(BaseAdapter):
2489 drivers = ('psycopg2','pg8000') 2490 2491 support_distributed_transaction = True 2492 types = { 2493 'boolean': 'CHAR(1)', 2494 'string': 'VARCHAR(%(length)s)', 2495 'text': 'TEXT', 2496 'json': 'TEXT', 2497 'password': 'VARCHAR(%(length)s)', 2498 'blob': 'BYTEA', 2499 'upload': 'VARCHAR(%(length)s)', 2500 'integer': 'INTEGER', 2501 'bigint': 'BIGINT', 2502 'float': 'FLOAT', 2503 'double': 'FLOAT8', 2504 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', 2505 'date': 'DATE', 2506 'time': 'TIME', 2507 'datetime': 'TIMESTAMP', 2508 'id': 'SERIAL PRIMARY KEY', 2509 'reference': 'INTEGER REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 2510 'list:integer': 'TEXT', 2511 'list:string': 'TEXT', 2512 'list:reference': 'TEXT', 2513 'geometry': 'GEOMETRY', 2514 'geography': 'GEOGRAPHY', 2515 'big-id': 'BIGSERIAL PRIMARY KEY', 2516 'big-reference': 'BIGINT REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 2517 } 2518
2519 - def varquote(self,name):
2520 return varquote_aux(name,'"%s"')
2521
2522 - def adapt(self,obj):
2523 if self.driver_name == 'psycopg2': 2524 return psycopg2_adapt(obj).getquoted() 2525 elif self.driver_name == 'pg8000': 2526 return "'%s'" % str(obj).replace("%","%%").replace("'","''") 2527 else: 2528 return "'%s'" % str(obj).replace("'","''")
2529
2530 - def sequence_name(self,table):
2531 return '%s_id_Seq' % table
2532
2533 - def RANDOM(self):
2534 return 'RANDOM()'
2535
2536 - def ADD(self, first, second):
2537 t = first.type 2538 if t in ('text','string','password', 'json', 'upload','blob'): 2539 return '(%s || %s)' % (self.expand(first), self.expand(second, t)) 2540 else: 2541 return '(%s + %s)' % (self.expand(first), self.expand(second, t))
2542
2543 - def distributed_transaction_begin(self,key):
2544 return
2545
2546 - def prepare(self,key):
2547 self.execute("PREPARE TRANSACTION '%s';" % key)
2548
2549 - def commit_prepared(self,key):
2550 self.execute("COMMIT PREPARED '%s';" % key)
2551
2552 - def rollback_prepared(self,key):
2553 self.execute("ROLLBACK PREPARED '%s';" % key)
2554
2555 - def create_sequence_and_triggers(self, query, table, **args):
2556 # following lines should only be executed if table._sequence_name does not exist 2557 # self.execute('CREATE SEQUENCE %s;' % table._sequence_name) 2558 # self.execute("ALTER TABLE %s ALTER COLUMN %s SET DEFAULT NEXTVAL('%s');" \ 2559 # % (table._tablename, table._fieldname, table._sequence_name)) 2560 self.execute(query)
2561 2562 REGEX_URI = re.compile('^(?P<user>[^:@]+)(\:(?P<password>[^@]*))?@(?P<host>[^\:@]+)(\:(?P<port>[0-9]+))?/(?P<db>[^\?]+)(\?sslmode=(?P<sslmode>.+))?$') 2563
2564 - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', 2565 credential_decoder=IDENTITY, driver_args={}, 2566 adapter_args={}, do_connect=True, srid=4326, 2567 after_connection=None):
2568 self.db = db 2569 self.dbengine = "postgres" 2570 self.uri = uri 2571 if do_connect: self.find_driver(adapter_args,uri) 2572 self.pool_size = pool_size 2573 self.folder = folder 2574 self.db_codec = db_codec 2575 self._after_connection = after_connection 2576 self.srid = srid 2577 self.find_or_make_work_folder() 2578 ruri = uri.split('://',1)[1] 2579 m = self.REGEX_URI.match(ruri) 2580 if not m: 2581 raise SyntaxError("Invalid URI string in DAL") 2582 user = credential_decoder(m.group('user')) 2583 if not user: 2584 raise SyntaxError('User required') 2585 password = credential_decoder(m.group('password')) 2586 if not password: 2587 password = '' 2588 host = m.group('host') 2589 if not host: 2590 raise SyntaxError('Host name required') 2591 db = m.group('db') 2592 if not db: 2593 raise SyntaxError('Database name required') 2594 port = m.group('port') or '5432' 2595 sslmode = m.group('sslmode') 2596 if sslmode: 2597 msg = ("dbname='%s' user='%s' host='%s' " 2598 "port=%s password='%s' sslmode='%s'") \ 2599 % (db, user, host, port, password, sslmode) 2600 else: 2601 msg = ("dbname='%s' user='%s' host='%s' " 2602 "port=%s password='%s'") \ 2603 % (db, user, host, port, password) 2604 # choose diver according uri 2605 self.__version__ = "%s %s" % (self.driver.__name__, self.driver.__version__) 2606 def connector(msg=msg,driver_args=driver_args): 2607 return self.driver.connect(msg,**driver_args)
2608 self.connector = connector 2609 if do_connect: self.reconnect()
2610
2611 - def after_connection(self):
2612 self.connection.set_client_encoding('UTF8') 2613 self.execute("SET standard_conforming_strings=on;") 2614 self.try_json()
2615
2616 - def lastrowid(self,table):
2617 self.execute("select currval('%s')" % table._sequence_name) 2618 return int(self.cursor.fetchone()[0])
2619
2620 - def try_json(self):
2621 # check JSON data type support 2622 # (to be added to after_connection) 2623 if self.driver_name == "pg8000": 2624 supports_json = self.connection.server_version >= "9.2.0" 2625 elif (self.driver_name == "psycopg2") and \ 2626 (self.driver.__version__ >= "2.0.12"): 2627 supports_json = self.connection.server_version >= 90200 2628 elif self.driver_name == "zxJDBC": 2629 supports_json = self.connection.dbversion >= "9.2.0" 2630 else: supports_json = None 2631 if supports_json: self.types["json"] = "JSON" 2632 else: LOGGER.debug("Your database version does not support the JSON data type (using TEXT instead)")
2633
2634 - def LIKE(self,first,second):
2635 args = (self.expand(first), self.expand(second,'string')) 2636 if not first.type in ('string', 'text', 'json'): 2637 return '(CAST(%s AS CHAR(%s)) LIKE %s)' % (args[0], first.length, args[1]) 2638 else: 2639 return '(%s LIKE %s)' % args
2640
2641 - def ILIKE(self,first,second):
2642 args = (self.expand(first), self.expand(second,'string')) 2643 if not first.type in ('string', 'text', 'json'): 2644 return '(CAST(%s AS CHAR(%s)) LIKE %s)' % (args[0], first.length, args[1]) 2645 else: 2646 return '(%s ILIKE %s)' % args
2647
2648 - def REGEXP(self,first,second):
2649 return '(%s ~ %s)' % (self.expand(first), 2650 self.expand(second,'string'))
2651
2652 - def STARTSWITH(self,first,second):
2653 return '(%s ILIKE %s)' % (self.expand(first), 2654 self.expand(second+'%','string'))
2655
2656 - def ENDSWITH(self,first,second):
2657 return '(%s ILIKE %s)' % (self.expand(first), 2658 self.expand('%'+second,'string'))
2659
2660 - def CONTAINS(self,first,second,case_sensitive=False):
2661 if first.type in ('string','text', 'json'): 2662 second = '%'+str(second).replace('%','%%')+'%' 2663 elif first.type.startswith('list:'): 2664 second = '%|'+str(second).replace('|','||').replace('%','%%')+'|%' 2665 op = case_sensitive and self.LIKE or self.ILIKE 2666 return op(first,second)
2667 2668 # GIS functions 2669
2670 - def ST_ASGEOJSON(self, first, second):
2671 """ 2672 http://postgis.org/docs/ST_AsGeoJSON.html 2673 """ 2674 return 'ST_AsGeoJSON(%s,%s,%s,%s)' %(second['version'], 2675 self.expand(first), second['precision'], second['options'])
2676
2677 - def ST_ASTEXT(self, first):
2678 """ 2679 http://postgis.org/docs/ST_AsText.html 2680 """ 2681 return 'ST_AsText(%s)' %(self.expand(first))
2682
2683 - def ST_X(self, first):
2684 """ 2685 http://postgis.org/docs/ST_X.html 2686 """ 2687 return 'ST_X(%s)' %(self.expand(first))
2688
2689 - def ST_Y(self, first):
2690 """ 2691 http://postgis.org/docs/ST_Y.html 2692 """ 2693 return 'ST_Y(%s)' %(self.expand(first))
2694
2695 - def ST_CONTAINS(self, first, second):
2696 """ 2697 http://postgis.org/docs/ST_Contains.html 2698 """ 2699 return 'ST_Contains(%s,%s)' %(self.expand(first), self.expand(second, first.type))
2700
2701 - def ST_DISTANCE(self, first, second):
2702 """ 2703 http://postgis.org/docs/ST_Distance.html 2704 """ 2705 return 'ST_Distance(%s,%s)' %(self.expand(first), self.expand(second, first.type))
2706
2707 - def ST_EQUALS(self, first, second):
2708 """ 2709 http://postgis.org/docs/ST_Equals.html 2710 """ 2711 return 'ST_Equals(%s,%s)' %(self.expand(first), self.expand(second, first.type))
2712
2713 - def ST_INTERSECTS(self, first, second):
2714 """ 2715 http://postgis.org/docs/ST_Intersects.html 2716 """ 2717 return 'ST_Intersects(%s,%s)' %(self.expand(first), self.expand(second, first.type))
2718
2719 - def ST_OVERLAPS(self, first, second):
2720 """ 2721 http://postgis.org/docs/ST_Overlaps.html 2722 """ 2723 return 'ST_Overlaps(%s,%s)' %(self.expand(first), self.expand(second, first.type))
2724
2725 - def ST_SIMPLIFY(self, first, second):
2726 """ 2727 http://postgis.org/docs/ST_Simplify.html 2728 """ 2729 return 'ST_Simplify(%s,%s)' %(self.expand(first), self.expand(second, 'double'))
2730
2731 - def ST_TOUCHES(self, first, second):
2732 """ 2733 http://postgis.org/docs/ST_Touches.html 2734 """ 2735 return 'ST_Touches(%s,%s)' %(self.expand(first), self.expand(second, first.type))
2736
2737 - def ST_WITHIN(self, first, second):
2738 """ 2739 http://postgis.org/docs/ST_Within.html 2740 """ 2741 return 'ST_Within(%s,%s)' %(self.expand(first), self.expand(second, first.type))
2742
2743 - def represent(self, obj, fieldtype):
2744 field_is_type = fieldtype.startswith 2745 if field_is_type('geo'): 2746 srid = 4326 # postGIS default srid for geometry 2747 geotype, parms = fieldtype[:-1].split('(') 2748 parms = parms.split(',') 2749 if len(parms) >= 2: 2750 schema, srid = parms[:2] 2751 if field_is_type('geometry'): 2752 value = "ST_GeomFromText('%s',%s)" %(obj, srid) 2753 elif field_is_type('geography'): 2754 value = "ST_GeogFromText('SRID=%s;%s')" %(srid, obj) 2755 # else: 2756 # raise SyntaxError('Invalid field type %s' %fieldtype) 2757 return value 2758 return BaseAdapter.represent(self, obj, fieldtype)
2759
2760 -class NewPostgreSQLAdapter(PostgreSQLAdapter):
2761 drivers = ('psycopg2','pg8000') 2762 2763 types = { 2764 'boolean': 'CHAR(1)', 2765 'string': 'VARCHAR(%(length)s)', 2766 'text': 'TEXT', 2767 'json': 'TEXT', 2768 'password': 'VARCHAR(%(length)s)', 2769 'blob': 'BYTEA', 2770 'upload': 'VARCHAR(%(length)s)', 2771 'integer': 'INTEGER', 2772 'bigint': 'BIGINT', 2773 'float': 'FLOAT', 2774 'double': 'FLOAT8', 2775 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', 2776 'date': 'DATE', 2777 'time': 'TIME', 2778 'datetime': 'TIMESTAMP', 2779 'id': 'SERIAL PRIMARY KEY', 2780 'reference': 'INTEGER REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 2781 'list:integer': 'BIGINT[]', 2782 'list:string': 'TEXT[]', 2783 'list:reference': 'BIGINT[]', 2784 'geometry': 'GEOMETRY', 2785 'geography': 'GEOGRAPHY', 2786 'big-id': 'BIGSERIAL PRIMARY KEY', 2787 'big-reference': 'BIGINT REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 2788 } 2789
2790 - def parse_list_integers(self, value, field_type):
2791 return value
2792
2793 - def parse_list_references(self, value, field_type):
2794 return [self.parse_reference(r, field_type[5:]) for r in value]
2795
2796 - def parse_list_strings(self, value, field_type):
2797 return value
2798
2799 - def represent(self, obj, fieldtype):
2800 field_is_type = fieldtype.startswith 2801 if field_is_type('list:'): 2802 if not obj: 2803 obj = [] 2804 elif not isinstance(obj, (list, tuple)): 2805 obj = [obj] 2806 if field_is_type('list:string'): 2807 obj = map(str,obj) 2808 else: 2809 obj = map(int,obj) 2810 return 'ARRAY[%s]' % ','.join(repr(item) for item in obj) 2811 return BaseAdapter.represent(self, obj, fieldtype)
2812
2813 2814 -class JDBCPostgreSQLAdapter(PostgreSQLAdapter):
2815 drivers = ('zxJDBC',) 2816 2817 REGEX_URI = re.compile('^(?P<user>[^:@]+)(\:(?P<password>[^@]*))?@(?P<host>[^\:/]+)(\:(?P<port>[0-9]+))?/(?P<db>.+)$') 2818
2819 - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', 2820 credential_decoder=IDENTITY, driver_args={}, 2821 adapter_args={}, do_connect=True, after_connection=None ):
2822 self.db = db 2823 self.dbengine = "postgres" 2824 self.uri = uri 2825 if do_connect: self.find_driver(adapter_args,uri) 2826 self.pool_size = pool_size 2827 self.folder = folder 2828 self.db_codec = db_codec 2829 self._after_connection = after_connection 2830 self.find_or_make_work_folder() 2831 ruri = uri.split('://',1)[1] 2832 m = self.REGEX_URI.match(ruri) 2833 if not m: 2834 raise SyntaxError("Invalid URI string in DAL") 2835 user = credential_decoder(m.group('user')) 2836 if not user: 2837 raise SyntaxError('User required') 2838 password = credential_decoder(m.group('password')) 2839 if not password: 2840 password = '' 2841 host = m.group('host') 2842 if not host: 2843 raise SyntaxError('Host name required') 2844 db = m.group('db') 2845 if not db: 2846 raise SyntaxError('Database name required') 2847 port = m.group('port') or '5432' 2848 msg = ('jdbc:postgresql://%s:%s/%s' % (host, port, db), user, password) 2849 def connector(msg=msg,driver_args=driver_args): 2850 return self.driver.connect(*msg,**driver_args)
2851 self.connector = connector 2852 if do_connect: self.reconnect()
2853
2854 - def after_connection(self):
2855 self.connection.set_client_encoding('UTF8') 2856 self.execute('BEGIN;') 2857 self.execute("SET CLIENT_ENCODING TO 'UNICODE';") 2858 self.try_json()
2859
2860 2861 -class OracleAdapter(BaseAdapter):
2862 drivers = ('cx_Oracle',) 2863 2864 commit_on_alter_table = False 2865 types = { 2866 'boolean': 'CHAR(1)', 2867 'string': 'VARCHAR2(%(length)s)', 2868 'text': 'CLOB', 2869 'json': 'CLOB', 2870 'password': 'VARCHAR2(%(length)s)', 2871 'blob': 'CLOB', 2872 'upload': 'VARCHAR2(%(length)s)', 2873 'integer': 'INT', 2874 'bigint': 'NUMBER', 2875 'float': 'FLOAT', 2876 'double': 'BINARY_DOUBLE', 2877 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', 2878 'date': 'DATE', 2879 'time': 'CHAR(8)', 2880 'datetime': 'DATE', 2881 'id': 'NUMBER PRIMARY KEY', 2882 'reference': 'NUMBER, CONSTRAINT %(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 2883 'list:integer': 'CLOB', 2884 'list:string': 'CLOB', 2885 'list:reference': 'CLOB', 2886 'big-id': 'NUMBER PRIMARY KEY', 2887 'big-reference': 'NUMBER, CONSTRAINT %(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 2888 'reference FK': ', CONSTRAINT FK_%(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 2889 'reference TFK': ' CONSTRAINT FK_%(foreign_table)s_PK FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s', 2890 } 2891
2892 - def sequence_name(self,tablename):
2893 return '%s_sequence' % tablename
2894
2895 - def trigger_name(self,tablename):
2896 return '%s_trigger' % tablename
2897
2898 - def LEFT_JOIN(self):
2899 return 'LEFT OUTER JOIN'
2900
2901 - def RANDOM(self):
2902 return 'dbms_random.value'
2903
2904 - def NOT_NULL(self,default,field_type):
2905 return 'DEFAULT %s NOT NULL' % self.represent(default,field_type)
2906
2907 - def _drop(self,table,mode):
2908 sequence_name = table._sequence_name 2909 return ['DROP TABLE %s %s;' % (table, mode), 'DROP SEQUENCE %s;' % sequence_name]
2910
2911 - def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
2912 if limitby: 2913 (lmin, lmax) = limitby 2914 if len(sql_w) > 1: 2915 sql_w_row = sql_w + ' AND w_row > %i' % lmin 2916 else: 2917 sql_w_row = 'WHERE w_row > %i' % lmin 2918 return 'SELECT %s %s FROM (SELECT w_tmp.*, ROWNUM w_row FROM (SELECT %s FROM %s%s%s) w_tmp WHERE ROWNUM<=%i) %s %s %s;' % (sql_s, sql_f, sql_f, sql_t, sql_w, sql_o, lmax, sql_t, sql_w_row, sql_o) 2919 return 'SELECT %s %s FROM %s%s%s;' % (sql_s, sql_f, sql_t, sql_w, sql_o)
2920
2921 - def constraint_name(self, tablename, fieldname):
2922 constraint_name = BaseAdapter.constraint_name(self, tablename, fieldname) 2923 if len(constraint_name)>30: 2924 constraint_name = '%s_%s__constraint' % (tablename[:10], fieldname[:7]) 2925 return constraint_name
2926
2927 - def represent_exceptions(self, obj, fieldtype):
2928 if fieldtype == 'blob': 2929 obj = base64.b64encode(str(obj)) 2930 return ":CLOB('%s')" % obj 2931 elif fieldtype == 'date': 2932 if isinstance(obj, (datetime.date, datetime.datetime)): 2933 obj = obj.isoformat()[:10] 2934 else: 2935 obj = str(obj) 2936 return "to_date('%s','yyyy-mm-dd')" % obj 2937 elif fieldtype == 'datetime': 2938 if isinstance(obj, datetime.datetime): 2939 obj = obj.isoformat()[:19].replace('T',' ') 2940 elif isinstance(obj, datetime.date): 2941 obj = obj.isoformat()[:10]+' 00:00:00' 2942 else: 2943 obj = str(obj) 2944 return "to_date('%s','yyyy-mm-dd hh24:mi:ss')" % obj 2945 return None
2946
2947 - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', 2948 credential_decoder=IDENTITY, driver_args={}, 2949 adapter_args={}, do_connect=True, after_connection=None):
2950 self.db = db 2951 self.dbengine = "oracle" 2952 self.uri = uri 2953 if do_connect: self.find_driver(adapter_args,uri) 2954 self.pool_size = pool_size 2955 self.folder = folder 2956 self.db_codec = db_codec 2957 self._after_connection = after_connection 2958 self.find_or_make_work_folder() 2959 ruri = uri.split('://',1)[1] 2960 if not 'threaded' in driver_args: 2961 driver_args['threaded']=True 2962 def connector(uri=ruri,driver_args=driver_args): 2963 return self.driver.connect(uri,**driver_args)
2964 self.connector = connector 2965 if do_connect: self.reconnect()
2966
2967 - def after_connection(self):
2968 self.execute("ALTER SESSION SET NLS_DATE_FORMAT = 'YYYY-MM-DD HH24:MI:SS';") 2969 self.execute("ALTER SESSION SET NLS_TIMESTAMP_FORMAT = 'YYYY-MM-DD HH24:MI:SS';")
2970 2971 oracle_fix = re.compile("[^']*('[^']*'[^']*)*\:(?P<clob>CLOB\('([^']+|'')*'\))") 2972
2973 - def execute(self, command, args=None):
2974 args = args or [] 2975 i = 1 2976 while True: 2977 m = self.oracle_fix.match(command) 2978 if not m: 2979 break 2980 command = command[:m.start('clob')] + str(i) + command[m.end('clob'):] 2981 args.append(m.group('clob')[6:-2].replace("''", "'")) 2982 i += 1 2983 if command[-1:]==';': 2984 command = command[:-1] 2985 return self.log_execute(command, args)
2986
2987 - def create_sequence_and_triggers(self, query, table, **args):
2988 tablename = table._tablename 2989 sequence_name = table._sequence_name 2990 trigger_name = table._trigger_name 2991 self.execute(query) 2992 self.execute('CREATE SEQUENCE %s START WITH 1 INCREMENT BY 1 NOMAXVALUE MINVALUE -1;' % sequence_name) 2993 self.execute(""" 2994 CREATE OR REPLACE TRIGGER %(trigger_name)s BEFORE INSERT ON %(tablename)s FOR EACH ROW 2995 DECLARE 2996 curr_val NUMBER; 2997 diff_val NUMBER; 2998 PRAGMA autonomous_transaction; 2999 BEGIN 3000 IF :NEW.id IS NOT NULL THEN 3001 EXECUTE IMMEDIATE 'SELECT %(sequence_name)s.nextval FROM dual' INTO curr_val; 3002 diff_val := :NEW.id - curr_val - 1; 3003 IF diff_val != 0 THEN 3004 EXECUTE IMMEDIATE 'alter sequence %(sequence_name)s increment by '|| diff_val; 3005 EXECUTE IMMEDIATE 'SELECT %(sequence_name)s.nextval FROM dual' INTO curr_val; 3006 EXECUTE IMMEDIATE 'alter sequence %(sequence_name)s increment by 1'; 3007 END IF; 3008 END IF; 3009 SELECT %(sequence_name)s.nextval INTO :NEW.id FROM DUAL; 3010 END; 3011 """ % dict(trigger_name=trigger_name, tablename=tablename, sequence_name=sequence_name))
3012
3013 - def lastrowid(self,table):
3014 sequence_name = table._sequence_name 3015 self.execute('SELECT %s.currval FROM dual;' % sequence_name) 3016 return int(self.cursor.fetchone()[0])
3017 3018 #def parse_value(self, value, field_type, blob_decode=True): 3019 # if blob_decode and isinstance(value, cx_Oracle.LOB): 3020 # try: 3021 # value = value.read() 3022 # except self.driver.ProgrammingError: 3023 # # After a subsequent fetch the LOB value is not valid anymore 3024 # pass 3025 # return BaseAdapter.parse_value(self, value, field_type, blob_decode) 3026
3027 - def _fetchall(self):
3028 if any(x[1]==cx_Oracle.CLOB for x in self.cursor.description): 3029 return [tuple([(c.read() if type(c) == cx_Oracle.LOB else c) \ 3030 for c in r]) for r in self.cursor] 3031 else: 3032 return self.cursor.fetchall()
3033
3034 -class MSSQLAdapter(BaseAdapter):
3035 drivers = ('pyodbc',) 3036 3037 types = { 3038 'boolean': 'BIT', 3039 'string': 'VARCHAR(%(length)s)', 3040 'text': 'TEXT', 3041 'json': 'TEXT', 3042 'password': 'VARCHAR(%(length)s)', 3043 'blob': 'IMAGE', 3044 'upload': 'VARCHAR(%(length)s)', 3045 'integer': 'INT', 3046 'bigint': 'BIGINT', 3047 'float': 'FLOAT', 3048 'double': 'FLOAT', 3049 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', 3050 'date': 'DATETIME', 3051 'time': 'CHAR(8)', 3052 'datetime': 'DATETIME', 3053 'id': 'INT IDENTITY PRIMARY KEY', 3054 'reference': 'INT NULL, CONSTRAINT %(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 3055 'list:integer': 'TEXT', 3056 'list:string': 'TEXT', 3057 'list:reference': 'TEXT', 3058 'geometry': 'geometry', 3059 'geography': 'geography', 3060 'big-id': 'BIGINT IDENTITY PRIMARY KEY', 3061 'big-reference': 'BIGINT NULL, CONSTRAINT %(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 3062 'reference FK': ', CONSTRAINT FK_%(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 3063 'reference TFK': ' CONSTRAINT FK_%(foreign_table)s_PK FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s', 3064 } 3065
3066 - def concat_add(self,tablename):
3067 return '; ALTER TABLE %s ADD ' % tablename
3068
3069 - def varquote(self,name):
3070 return varquote_aux(name,'[%s]')
3071
3072 - def EXTRACT(self,field,what):
3073 return "DATEPART(%s,%s)" % (what, self.expand(field))
3074
3075 - def LEFT_JOIN(self):
3076 return 'LEFT OUTER JOIN'
3077
3078 - def RANDOM(self):
3079 return 'NEWID()'
3080
3081 - def ALLOW_NULL(self):
3082 return ' NULL'
3083
3084 - def SUBSTRING(self,field,parameters):
3085 return 'SUBSTRING(%s,%s,%s)' % (self.expand(field), parameters[0], parameters[1])
3086
3087 - def PRIMARY_KEY(self,key):
3088 return 'PRIMARY KEY CLUSTERED (%s)' % key
3089
3090 - def AGGREGATE(self, first, what):
3091 if what == 'LENGTH': 3092 what = 'LEN' 3093 return "%s(%s)" % (what, self.expand(first))
3094 3095
3096 - def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
3097 if limitby: 3098 (lmin, lmax) = limitby 3099 sql_s += ' TOP %i' % lmax 3100 if 'GROUP BY' in sql_o: 3101 sql_o = sql_o[:sql_o.find('ORDER BY ')] 3102 return 'SELECT %s %s FROM %s%s%s;' % (sql_s, sql_f, sql_t, sql_w, sql_o)
3103 3104 TRUE = 1 3105 FALSE = 0 3106 3107 REGEX_DSN = re.compile('^(?P<dsn>.+)$') 3108 REGEX_URI = re.compile('^(?P<user>[^:@]+)(\:(?P<password>[^@]*))?@(?P<host>[^\:/]+)(\:(?P<port>[0-9]+))?/(?P<db>[^\?]+)(\?(?P<urlargs>.*))?$') 3109 REGEX_ARGPATTERN = re.compile('(?P<argkey>[^=]+)=(?P<argvalue>[^&]*)') 3110
3111 - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', 3112 credential_decoder=IDENTITY, driver_args={}, 3113 adapter_args={}, do_connect=True, srid=4326, 3114 after_connection=None):
3115 self.db = db 3116 self.dbengine = "mssql" 3117 self.uri = uri 3118 if do_connect: self.find_driver(adapter_args,uri) 3119 self.pool_size = pool_size 3120 self.folder = folder 3121 self.db_codec = db_codec 3122 self._after_connection = after_connection 3123 self.srid = srid 3124 self.find_or_make_work_folder() 3125 # ## read: http://bytes.com/groups/python/460325-cx_oracle-utf8 3126 ruri = uri.split('://',1)[1] 3127 if '@' not in ruri: 3128 try: 3129 m = self.REGEX_DSN.match(ruri) 3130 if not m: 3131 raise SyntaxError( 3132 'Parsing uri string(%s) has no result' % self.uri) 3133 dsn = m.group('dsn') 3134 if not dsn: 3135 raise SyntaxError('DSN required') 3136 except SyntaxError: 3137 e = sys.exc_info()[1] 3138 LOGGER.error('NdGpatch error') 3139 raise e 3140 # was cnxn = 'DSN=%s' % dsn 3141 cnxn = dsn 3142 else: 3143 m = self.REGEX_URI.match(ruri) 3144 if not m: 3145 raise SyntaxError( 3146 "Invalid URI string in DAL: %s" % self.uri) 3147 user = credential_decoder(m.group('user')) 3148 if not user: 3149 raise SyntaxError('User required') 3150 password = credential_decoder(m.group('password')) 3151 if not password: 3152 password = '' 3153 host = m.group('host') 3154 if not host: 3155 raise SyntaxError('Host name required') 3156 db = m.group('db') 3157 if not db: 3158 raise SyntaxError('Database name required') 3159 port = m.group('port') or '1433' 3160 # Parse the optional url name-value arg pairs after the '?' 3161 # (in the form of arg1=value1&arg2=value2&...) 3162 # Default values (drivers like FreeTDS insist on uppercase parameter keys) 3163 argsdict = { 'DRIVER':'{SQL Server}' } 3164 urlargs = m.group('urlargs') or '' 3165 for argmatch in self.REGEX_ARGPATTERN.finditer(urlargs): 3166 argsdict[str(argmatch.group('argkey')).upper()] = argmatch.group('argvalue') 3167 urlargs = ';'.join(['%s=%s' % (ak, av) for (ak, av) in argsdict.iteritems()]) 3168 cnxn = 'SERVER=%s;PORT=%s;DATABASE=%s;UID=%s;PWD=%s;%s' \ 3169 % (host, port, db, user, password, urlargs) 3170 def connector(cnxn=cnxn,driver_args=driver_args): 3171 return self.driver.connect(cnxn,**driver_args)
3172 self.connector = connector 3173 if do_connect: self.reconnect()
3174
3175 - def lastrowid(self,table):
3176 #self.execute('SELECT @@IDENTITY;') 3177 self.execute('SELECT SCOPE_IDENTITY();') 3178 return int(self.cursor.fetchone()[0])
3179
3180 - def integrity_error_class(self):
3181 return pyodbc.IntegrityError
3182
3183 - def rowslice(self,rows,minimum=0,maximum=None):
3184 if maximum is None: 3185 return rows[minimum:] 3186 return rows[minimum:maximum]
3187
3188 - def EPOCH(self, first):
3189 return "DATEDIFF(second, '1970-01-01 00:00:00', %s)" % self.expand(first)
3190 3191 # GIS Spatial Extensions 3192 3193 # No STAsGeoJSON in MSSQL 3194
3195 - def ST_ASTEXT(self, first):
3196 return '%s.STAsText()' %(self.expand(first))
3197
3198 - def ST_CONTAINS(self, first, second):
3199 return '%s.STContains(%s)=1' %(self.expand(first), self.expand(second, first.type))
3200
3201 - def ST_DISTANCE(self, first, second):
3202 return '%s.STDistance(%s)' %(self.expand(first), self.expand(second, first.type))
3203
3204 - def ST_EQUALS(self, first, second):
3205 return '%s.STEquals(%s)=1' %(self.expand(first), self.expand(second, first.type))
3206
3207 - def ST_INTERSECTS(self, first, second):
3208 return '%s.STIntersects(%s)=1' %(self.expand(first), self.expand(second, first.type))
3209
3210 - def ST_OVERLAPS(self, first, second):
3211 return '%s.STOverlaps(%s)=1' %(self.expand(first), self.expand(second, first.type))
3212 3213 # no STSimplify in MSSQL 3214
3215 - def ST_TOUCHES(self, first, second):
3216 return '%s.STTouches(%s)=1' %(self.expand(first), self.expand(second, first.type))
3217
3218 - def ST_WITHIN(self, first, second):
3219 return '%s.STWithin(%s)=1' %(self.expand(first), self.expand(second, first.type))
3220
3221 - def represent(self, obj, fieldtype):
3222 field_is_type = fieldtype.startswith 3223 if field_is_type('geometry'): 3224 srid = 0 # MS SQL default srid for geometry 3225 geotype, parms = fieldtype[:-1].split('(') 3226 if parms: 3227 srid = parms 3228 return "geometry::STGeomFromText('%s',%s)" %(obj, srid) 3229 elif fieldtype == 'geography': 3230 srid = 4326 # MS SQL default srid for geography 3231 geotype, parms = fieldtype[:-1].split('(') 3232 if parms: 3233 srid = parms 3234 return "geography::STGeomFromText('%s',%s)" %(obj, srid) 3235 # else: 3236 # raise SyntaxError('Invalid field type %s' %fieldtype) 3237 return "geometry::STGeomFromText('%s',%s)" %(obj, srid) 3238 return BaseAdapter.represent(self, obj, fieldtype)
3239
3240 3241 -class MSSQL3Adapter(MSSQLAdapter):
3242 """ experimental support for pagination in MSSQL"""
3243 - def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
3244 if limitby: 3245 (lmin, lmax) = limitby 3246 if lmin == 0: 3247 sql_s += ' TOP %i' % lmax 3248 return 'SELECT %s %s FROM %s%s%s;' % (sql_s, sql_f, sql_t, sql_w, sql_o) 3249 lmin += 1 3250 sql_o_inner = sql_o[sql_o.find('ORDER BY ')+9:] 3251 sql_g_inner = sql_o[:sql_o.find('ORDER BY ')] 3252 sql_f_outer = ['f_%s' % f for f in range(len(sql_f.split(',')))] 3253 sql_f_inner = [f for f in sql_f.split(',')] 3254 sql_f_iproxy = ['%s AS %s' % (o, n) for (o, n) in zip(sql_f_inner, sql_f_outer)] 3255 sql_f_iproxy = ', '.join(sql_f_iproxy) 3256 sql_f_oproxy = ', '.join(sql_f_outer) 3257 return 'SELECT %s %s FROM (SELECT %s ROW_NUMBER() OVER (ORDER BY %s) AS w_row, %s FROM %s%s%s) TMP WHERE w_row BETWEEN %i AND %s;' % (sql_s,sql_f_oproxy,sql_s,sql_f,sql_f_iproxy,sql_t,sql_w,sql_g_inner,lmin,lmax) 3258 return 'SELECT %s %s FROM %s%s%s;' % (sql_s,sql_f,sql_t,sql_w,sql_o)
3259 - def rowslice(self,rows,minimum=0,maximum=None):
3260 return rows
3261
3262 3263 -class MSSQL2Adapter(MSSQLAdapter):
3264 drivers = ('pyodbc',) 3265 3266 types = { 3267 'boolean': 'CHAR(1)', 3268 'string': 'NVARCHAR(%(length)s)', 3269 'text': 'NTEXT', 3270 'json': 'NTEXT', 3271 'password': 'NVARCHAR(%(length)s)', 3272 'blob': 'IMAGE', 3273 'upload': 'NVARCHAR(%(length)s)', 3274 'integer': 'INT', 3275 'bigint': 'BIGINT', 3276 'float': 'FLOAT', 3277 'double': 'FLOAT', 3278 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', 3279 'date': 'DATETIME', 3280 'time': 'CHAR(8)', 3281 'datetime': 'DATETIME', 3282 'id': 'INT IDENTITY PRIMARY KEY', 3283 'reference': 'INT, CONSTRAINT %(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 3284 'list:integer': 'NTEXT', 3285 'list:string': 'NTEXT', 3286 'list:reference': 'NTEXT', 3287 'big-id': 'BIGINT IDENTITY PRIMARY KEY', 3288 'big-reference': 'BIGINT, CONSTRAINT %(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 3289 'reference FK': ', CONSTRAINT FK_%(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 3290 'reference TFK': ' CONSTRAINT FK_%(foreign_table)s_PK FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s', 3291 } 3292
3293 - def represent(self, obj, fieldtype):
3294 value = BaseAdapter.represent(self, obj, fieldtype) 3295 if fieldtype in ('string','text', 'json') and value[:1]=="'": 3296 value = 'N'+value 3297 return value
3298
3299 - def execute(self,a):
3300 return self.log_execute(a.decode('utf8'))
3301
3302 3303 -class SybaseAdapter(MSSQLAdapter):
3304 drivers = ('Sybase',) 3305 3306 types = { 3307 'boolean': 'BIT', 3308 'string': 'CHAR VARYING(%(length)s)', 3309 'text': 'TEXT', 3310 'json': 'TEXT', 3311 'password': 'CHAR VARYING(%(length)s)', 3312 'blob': 'IMAGE', 3313 'upload': 'CHAR VARYING(%(length)s)', 3314 'integer': 'INT', 3315 'bigint': 'BIGINT', 3316 'float': 'FLOAT', 3317 'double': 'FLOAT', 3318 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', 3319 'date': 'DATETIME', 3320 'time': 'CHAR(8)', 3321 'datetime': 'DATETIME', 3322 'id': 'INT IDENTITY PRIMARY KEY', 3323 'reference': 'INT NULL, CONSTRAINT %(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 3324 'list:integer': 'TEXT', 3325 'list:string': 'TEXT', 3326 'list:reference': 'TEXT', 3327 'geometry': 'geometry', 3328 'geography': 'geography', 3329 'big-id': 'BIGINT IDENTITY PRIMARY KEY', 3330 'big-reference': 'BIGINT NULL, CONSTRAINT %(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 3331 'reference FK': ', CONSTRAINT FK_%(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 3332 'reference TFK': ' CONSTRAINT FK_%(foreign_table)s_PK FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s', 3333 } 3334 3335
3336 - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', 3337 credential_decoder=IDENTITY, driver_args={}, 3338 adapter_args={}, do_connect=True, srid=4326, 3339 after_connection=None):
3340 self.db = db 3341 self.dbengine = "sybase" 3342 self.uri = uri 3343 if do_connect: self.find_driver(adapter_args,uri) 3344 self.pool_size = pool_size 3345 self.folder = folder 3346 self.db_codec = db_codec 3347 self._after_connection = after_connection 3348 self.srid = srid 3349 self.find_or_make_work_folder() 3350 # ## read: http://bytes.com/groups/python/460325-cx_oracle-utf8 3351 ruri = uri.split('://',1)[1] 3352 if '@' not in ruri: 3353 try: 3354 m = self.REGEX_DSN.match(ruri) 3355 if not m: 3356 raise SyntaxError( 3357 'Parsing uri string(%s) has no result' % self.uri) 3358 dsn = m.group('dsn') 3359 if not dsn: 3360 raise SyntaxError('DSN required') 3361 except SyntaxError: 3362 e = sys.exc_info()[1] 3363 LOGGER.error('NdGpatch error') 3364 raise e 3365 else: 3366 m = self.REGEX_URI.match(uri) 3367 if not m: 3368 raise SyntaxError( 3369 "Invalid URI string in DAL: %s" % self.uri) 3370 user = credential_decoder(m.group('user')) 3371 if not user: 3372 raise SyntaxError('User required') 3373 password = credential_decoder(m.group('password')) 3374 if not password: 3375 password = '' 3376 host = m.group('host') 3377 if not host: 3378 raise SyntaxError('Host name required') 3379 db = m.group('db') 3380 if not db: 3381 raise SyntaxError('Database name required') 3382 port = m.group('port') or '1433' 3383 3384 dsn = 'sybase:host=%s:%s;dbname=%s' % (host,port,db) 3385 3386 driver_args.update(user = credential_decoder(user), 3387 password = credential_decoder(password)) 3388 3389 def connector(dsn=dsn,driver_args=driver_args): 3390 return self.driver.connect(dsn,**driver_args)
3391 self.connector = connector 3392 if do_connect: self.reconnect()
3393
3394 - def integrity_error_class(self):
3395 return RuntimeError # FIX THIS
3396
3397 3398 -class FireBirdAdapter(BaseAdapter):
3399 drivers = ('kinterbasdb','firebirdsql','fdb','pyodbc') 3400 3401 commit_on_alter_table = False 3402 support_distributed_transaction = True 3403 types = { 3404 'boolean': 'CHAR(1)', 3405 'string': 'VARCHAR(%(length)s)', 3406 'text': 'BLOB SUB_TYPE 1', 3407 'json': 'BLOB SUB_TYPE 1', 3408 'password': 'VARCHAR(%(length)s)', 3409 'blob': 'BLOB SUB_TYPE 0', 3410 'upload': 'VARCHAR(%(length)s)', 3411 'integer': 'INTEGER', 3412 'bigint': 'BIGINT', 3413 'float': 'FLOAT', 3414 'double': 'DOUBLE PRECISION', 3415 'decimal': 'DECIMAL(%(precision)s,%(scale)s)', 3416 'date': 'DATE', 3417 'time': 'TIME', 3418 'datetime': 'TIMESTAMP', 3419 'id': 'INTEGER PRIMARY KEY', 3420 'reference': 'INTEGER REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 3421 'list:integer': 'BLOB SUB_TYPE 1', 3422 'list:string': 'BLOB SUB_TYPE 1', 3423 'list:reference': 'BLOB SUB_TYPE 1', 3424 'big-id': 'BIGINT PRIMARY KEY', 3425 'big-reference': 'BIGINT REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 3426 } 3427
3428 - def sequence_name(self,tablename):
3429 return 'genid_%s' % tablename
3430
3431 - def trigger_name(self,tablename):
3432 return 'trg_id_%s' % tablename
3433
3434 - def RANDOM(self):
3435 return 'RAND()'
3436
3437 - def EPOCH(self, first):
3438 return "DATEDIFF(second, '1970-01-01 00:00:00', %s)" % self.expand(first)
3439
3440 - def NOT_NULL(self,default,field_type):
3441 return 'DEFAULT %s NOT NULL' % self.represent(default,field_type)
3442
3443 - def SUBSTRING(self,field,parameters):
3444 return 'SUBSTRING(%s from %s for %s)' % (self.expand(field), parameters[0], parameters[1])
3445
3446 - def CONTAINING(self,first,second):
3447 "case in-sensitive like operator" 3448 return '(%s CONTAINING %s)' % (self.expand(first), 3449 self.expand(second, 'string'))
3450
3451 - def CONTAINS(self, first, second, case_sensitive=False):
3452 if first.type in ('string','text'): 3453 second = str(second).replace('%','%%') 3454 elif first.type.startswith('list:'): 3455 second = '|'+str(second).replace('|','||').replace('%','%%')+'|' 3456 return self.CONTAINING(first,second)
3457
3458 - def _drop(self,table,mode):
3459 sequence_name = table._sequence_name 3460 return ['DROP TABLE %s %s;' % (table, mode), 'DROP GENERATOR %s;' % sequence_name]
3461
3462 - def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
3463 if limitby: 3464 (lmin, lmax) = limitby 3465 sql_s = ' FIRST %i SKIP %i %s' % (lmax - lmin, lmin, sql_s) 3466 return 'SELECT %s %s FROM %s%s%s;' % (sql_s, sql_f, sql_t, sql_w, sql_o)
3467
3468 - def _truncate(self,table,mode = ''):
3469 return ['DELETE FROM %s;' % table._tablename, 3470 'SET GENERATOR %s TO 0;' % table._sequence_name]
3471 3472 REGEX_URI = re.compile('^(?P<user>[^:@]+)(\:(?P<password>[^@]*))?@(?P<host>[^\:/]+)(\:(?P<port>[0-9]+))?/(?P<db>.+?)(\?set_encoding=(?P<charset>\w+))?$') 3473
3474 - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', 3475 credential_decoder=IDENTITY, driver_args={}, 3476 adapter_args={}, do_connect=True, after_connection=None):
3477 self.db = db 3478 self.dbengine = "firebird" 3479 self.uri = uri 3480 if do_connect: self.find_driver(adapter_args,uri) 3481 self.pool_size = pool_size 3482 self.folder = folder 3483 self.db_codec = db_codec 3484 self._after_connection = after_connection 3485 self.find_or_make_work_folder() 3486 ruri = uri.split('://',1)[1] 3487 m = self.REGEX_URI.match(ruri) 3488 if not m: 3489 raise SyntaxError("Invalid URI string in DAL: %s" % self.uri) 3490 user = credential_decoder(m.group('user')) 3491 if not user: 3492 raise SyntaxError('User required') 3493 password = credential_decoder(m.group('password')) 3494 if not password: 3495 password = '' 3496 host = m.group('host') 3497 if not host: 3498 raise SyntaxError('Host name required') 3499 port = int(m.group('port') or 3050) 3500 db = m.group('db') 3501 if not db: 3502 raise SyntaxError('Database name required') 3503 charset = m.group('charset') or 'UTF8' 3504 driver_args.update(dsn='%s/%s:%s' % (host,port,db), 3505 user = credential_decoder(user), 3506 password = credential_decoder(password), 3507 charset = charset) 3508 3509 def connector(driver_args=driver_args): 3510 return self.driver.connect(**driver_args)
3511 self.connector = connector 3512 if do_connect: self.reconnect()
3513
3514 - def create_sequence_and_triggers(self, query, table, **args):
3515 tablename = table._tablename 3516 sequence_name = table._sequence_name 3517 trigger_name = table._trigger_name 3518 self.execute(query) 3519 self.execute('create generator %s;' % sequence_name) 3520 self.execute('set generator %s to 0;' % sequence_name) 3521 self.execute('create trigger %s for %s active before insert position 0 as\nbegin\nif(new.id is null) then\nbegin\nnew.id = gen_id(%s, 1);\nend\nend;' % (trigger_name, tablename, sequence_name))
3522
3523 - def lastrowid(self,table):
3524 sequence_name = table._sequence_name 3525 self.execute('SELECT gen_id(%s, 0) FROM rdb$database' % sequence_name) 3526 return int(self.cursor.fetchone()[0])
3527
3528 3529 -class FireBirdEmbeddedAdapter(FireBirdAdapter):
3530 drivers = ('kinterbasdb','firebirdsql','fdb','pyodbc') 3531 3532 REGEX_URI = re.compile('^(?P<user>[^:@]+)(\:(?P<password>[^@]*))?@(?P<path>[^\?]+)(\?set_encoding=(?P<charset>\w+))?$') 3533
3534 - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', 3535 credential_decoder=IDENTITY, driver_args={}, 3536 adapter_args={}, do_connect=True, after_connection=None):
3537 self.db = db 3538 self.dbengine = "firebird" 3539 self.uri = uri 3540 if do_connect: self.find_driver(adapter_args,uri) 3541 self.pool_size = pool_size 3542 self.folder = folder 3543 self.db_codec = db_codec 3544 self._after_connection = after_connection 3545 self.find_or_make_work_folder() 3546 ruri = uri.split('://',1)[1] 3547 m = self.REGEX_URI.match(ruri) 3548 if not m: 3549 raise SyntaxError( 3550 "Invalid URI string in DAL: %s" % self.uri) 3551 user = credential_decoder(m.group('user')) 3552 if not user: 3553 raise SyntaxError('User required') 3554 password = credential_decoder(m.group('password')) 3555 if not password: 3556 password = '' 3557 pathdb = m.group('path') 3558 if not pathdb: 3559 raise SyntaxError('Path required') 3560 charset = m.group('charset') 3561 if not charset: 3562 charset = 'UTF8' 3563 host = '' 3564 driver_args.update(host=host, 3565 database=pathdb, 3566 user=credential_decoder(user), 3567 password=credential_decoder(password), 3568 charset=charset) 3569 3570 def connector(driver_args=driver_args): 3571 return self.driver.connect(**driver_args)
3572 self.connector = connector 3573 if do_connect: self.reconnect()
3574
3575 -class InformixAdapter(BaseAdapter):
3576 drivers = ('informixdb',) 3577 3578 types = { 3579 'boolean': 'CHAR(1)', 3580 'string': 'VARCHAR(%(length)s)', 3581 'text': 'BLOB SUB_TYPE 1', 3582 'json': 'BLOB SUB_TYPE 1', 3583 'password': 'VARCHAR(%(length)s)', 3584 'blob': 'BLOB SUB_TYPE 0', 3585 'upload': 'VARCHAR(%(length)s)', 3586 'integer': 'INTEGER', 3587 'bigint': 'BIGINT', 3588 'float': 'FLOAT', 3589 'double': 'DOUBLE PRECISION', 3590 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', 3591 'date': 'DATE', 3592 'time': 'CHAR(8)', 3593 'datetime': 'DATETIME', 3594 'id': 'SERIAL', 3595 'reference': 'INTEGER REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 3596 'list:integer': 'BLOB SUB_TYPE 1', 3597 'list:string': 'BLOB SUB_TYPE 1', 3598 'list:reference': 'BLOB SUB_TYPE 1', 3599 'big-id': 'BIGSERIAL', 3600 'big-reference': 'BIGINT REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 3601 'reference FK': 'REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s CONSTRAINT FK_%(table_name)s_%(field_name)s', 3602 'reference TFK': 'FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s CONSTRAINT TFK_%(table_name)s_%(field_name)s', 3603 } 3604
3605 - def RANDOM(self):
3606 return 'Random()'
3607
3608 - def NOT_NULL(self,default,field_type):
3609 return 'DEFAULT %s NOT NULL' % self.represent(default,field_type)
3610
3611 - def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
3612 if limitby: 3613 (lmin, lmax) = limitby 3614 fetch_amt = lmax - lmin 3615 dbms_version = int(self.connection.dbms_version.split('.')[0]) 3616 if lmin and (dbms_version >= 10): 3617 # Requires Informix 10.0+ 3618 sql_s += ' SKIP %d' % (lmin, ) 3619 if fetch_amt and (dbms_version >= 9): 3620 # Requires Informix 9.0+ 3621 sql_s += ' FIRST %d' % (fetch_amt, ) 3622 return 'SELECT %s %s FROM %s%s%s;' % (sql_s, sql_f, sql_t, sql_w, sql_o)
3623
3624 - def represent_exceptions(self, obj, fieldtype):
3625 if fieldtype == 'date': 3626 if isinstance(obj, (datetime.date, datetime.datetime)): 3627 obj = obj.isoformat()[:10] 3628 else: 3629 obj = str(obj) 3630 return "to_date('%s','%%Y-%%m-%%d')" % obj 3631 elif fieldtype == 'datetime': 3632 if isinstance(obj, datetime.datetime): 3633 obj = obj.isoformat()[:19].replace('T',' ') 3634 elif isinstance(obj, datetime.date): 3635 obj = obj.isoformat()[:10]+' 00:00:00' 3636 else: 3637 obj = str(obj) 3638 return "to_date('%s','%%Y-%%m-%%d %%H:%%M:%%S')" % obj 3639 return None
3640 3641 REGEX_URI = re.compile('^(?P<user>[^:@]+)(\:(?P<password>[^@]*))?@(?P<host>[^\:/]+)(\:(?P<port>[0-9]+))?/(?P<db>.+)$') 3642
3643 - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', 3644 credential_decoder=IDENTITY, driver_args={}, 3645 adapter_args={}, do_connect=True, after_connection=None):
3646 self.db = db 3647 self.dbengine = "informix" 3648 self.uri = uri 3649 if do_connect: self.find_driver(adapter_args,uri) 3650 self.pool_size = pool_size 3651 self.folder = folder 3652 self.db_codec = db_codec 3653 self._after_connection = after_connection 3654 self.find_or_make_work_folder() 3655 ruri = uri.split('://',1)[1] 3656 m = self.REGEX_URI.match(ruri) 3657 if not m: 3658 raise SyntaxError( 3659 "Invalid URI string in DAL: %s" % self.uri) 3660 user = credential_decoder(m.group('user')) 3661 if not user: 3662 raise SyntaxError('User required') 3663 password = credential_decoder(m.group('password')) 3664 if not password: 3665 password = '' 3666 host = m.group('host') 3667 if not host: 3668 raise SyntaxError('Host name required') 3669 db = m.group('db') 3670 if not db: 3671 raise SyntaxError('Database name required') 3672 user = credential_decoder(user) 3673 password = credential_decoder(password) 3674 dsn = '%s@%s' % (db,host) 3675 driver_args.update(user=user,password=password,autocommit=True) 3676 def connector(dsn=dsn,driver_args=driver_args): 3677 return self.driver.connect(dsn,**driver_args)
3678 self.connector = connector 3679 if do_connect: self.reconnect()
3680
3681 - def execute(self,command):
3682 if command[-1:]==';': 3683 command = command[:-1] 3684 return self.log_execute(command)
3685
3686 - def lastrowid(self,table):
3687 return self.cursor.sqlerrd[1]
3688
3689 - def integrity_error_class(self):
3690 return informixdb.IntegrityError
3691
3692 -class InformixSEAdapter(InformixAdapter):
3693 """ work in progress """ 3694
3695 - def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
3696 return 'SELECT %s %s FROM %s%s%s;' % \ 3697 (sql_s, sql_f, sql_t, sql_w, sql_o)
3698
3699 - def rowslice(self,rows,minimum=0,maximum=None):
3700 if maximum is None: 3701 return rows[minimum:] 3702 return rows[minimum:maximum]
3703
3704 -class DB2Adapter(BaseAdapter):
3705 drivers = ('pyodbc',) 3706 3707 types = { 3708 'boolean': 'CHAR(1)', 3709 'string': 'VARCHAR(%(length)s)', 3710 'text': 'CLOB', 3711 'json': 'CLOB', 3712 'password': 'VARCHAR(%(length)s)', 3713 'blob': 'BLOB', 3714 'upload': 'VARCHAR(%(length)s)', 3715 'integer': 'INT', 3716 'bigint': 'BIGINT', 3717 'float': 'REAL', 3718 'double': 'DOUBLE', 3719 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', 3720 'date': 'DATE', 3721 'time': 'TIME', 3722 'datetime': 'TIMESTAMP', 3723 'id': 'INT GENERATED ALWAYS AS IDENTITY PRIMARY KEY NOT NULL', 3724 'reference': 'INT, FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 3725 'list:integer': 'CLOB', 3726 'list:string': 'CLOB', 3727 'list:reference': 'CLOB', 3728 'big-id': 'BIGINT GENERATED ALWAYS AS IDENTITY PRIMARY KEY NOT NULL', 3729 'big-reference': 'BIGINT, FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 3730 'reference FK': ', CONSTRAINT FK_%(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 3731 'reference TFK': ' CONSTRAINT FK_%(foreign_table)s_PK FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s', 3732 } 3733
3734 - def LEFT_JOIN(self):
3735 return 'LEFT OUTER JOIN'
3736
3737 - def RANDOM(self):
3738 return 'RAND()'
3739
3740 - def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
3741 if limitby: 3742 (lmin, lmax) = limitby 3743 sql_o += ' FETCH FIRST %i ROWS ONLY' % lmax 3744 return 'SELECT %s %s FROM %s%s%s;' % (sql_s, sql_f, sql_t, sql_w, sql_o)
3745
3746 - def represent_exceptions(self, obj, fieldtype):
3747 if fieldtype == 'blob': 3748 obj = base64.b64encode(str(obj)) 3749 return "BLOB('%s')" % obj 3750 elif fieldtype == 'datetime': 3751 if isinstance(obj, datetime.datetime): 3752 obj = obj.isoformat()[:19].replace('T','-').replace(':','.') 3753 elif isinstance(obj, datetime.date): 3754 obj = obj.isoformat()[:10]+'-00.00.00' 3755 return "'%s'" % obj 3756 return None
3757
3758 - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', 3759 credential_decoder=IDENTITY, driver_args={}, 3760 adapter_args={}, do_connect=True, after_connection=None):
3761 self.db = db 3762 self.dbengine = "db2" 3763 self.uri = uri 3764 if do_connect: self.find_driver(adapter_args,uri) 3765 self.pool_size = pool_size 3766 self.folder = folder 3767 self.db_codec = db_codec 3768 self._after_connection = after_connection 3769 self.find_or_make_work_folder() 3770 ruri = uri.split('://', 1)[1] 3771 def connector(cnxn=ruri,driver_args=driver_args): 3772 return self.driver.connect(cnxn,**driver_args)
3773 self.connector = connector 3774 if do_connect: self.reconnect()
3775
3776 - def execute(self,command):
3777 if command[-1:]==';': 3778 command = command[:-1] 3779 return self.log_execute(command)
3780
3781 - def lastrowid(self,table):
3782 self.execute('SELECT DISTINCT IDENTITY_VAL_LOCAL() FROM %s;' % table) 3783 return int(self.cursor.fetchone()[0])
3784
3785 - def rowslice(self,rows,minimum=0,maximum=None):
3786 if maximum is None: 3787 return rows[minimum:] 3788 return rows[minimum:maximum]
3789
3790 3791 -class TeradataAdapter(BaseAdapter):
3792 drivers = ('pyodbc',) 3793 3794 types = { 3795 'boolean': 'CHAR(1)', 3796 'string': 'VARCHAR(%(length)s)', 3797 'text': 'CLOB', 3798 'json': 'CLOB', 3799 'password': 'VARCHAR(%(length)s)', 3800 'blob': 'BLOB', 3801 'upload': 'VARCHAR(%(length)s)', 3802 'integer': 'INT', 3803 'bigint': 'BIGINT', 3804 'float': 'REAL', 3805 'double': 'DOUBLE', 3806 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', 3807 'date': 'DATE', 3808 'time': 'TIME', 3809 'datetime': 'TIMESTAMP', 3810 # Modified Constraint syntax for Teradata. 3811 # Teradata does not support ON DELETE. 3812 'id': 'INT GENERATED ALWAYS AS IDENTITY', # Teradata Specific 3813 'reference': 'INT', 3814 'list:integer': 'CLOB', 3815 'list:string': 'CLOB', 3816 'list:reference': 'CLOB', 3817 'big-id': 'BIGINT GENERATED ALWAYS AS IDENTITY', # Teradata Specific 3818 'big-reference': 'BIGINT', 3819 'reference FK': ' REFERENCES %(foreign_key)s', 3820 'reference TFK': ' FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s)', 3821 } 3822
3823 - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', 3824 credential_decoder=IDENTITY, driver_args={}, 3825 adapter_args={}, do_connect=True, after_connection=None):
3826 self.db = db 3827 self.dbengine = "teradata" 3828 self.uri = uri 3829 if do_connect: self.find_driver(adapter_args,uri) 3830 self.pool_size = pool_size 3831 self.folder = folder 3832 self.db_codec = db_codec 3833 self._after_connection = after_connection 3834 self.find_or_make_work_folder() 3835 ruri = uri.split('://', 1)[1] 3836 def connector(cnxn=ruri,driver_args=driver_args): 3837 return self.driver.connect(cnxn,**driver_args)
3838 self.connector = connector 3839 if do_connect: self.reconnect()
3840
3841 - def LEFT_JOIN(self):
3842 return 'LEFT OUTER JOIN'
3843 3844 # Similar to MSSQL, Teradata can't specify a range (for Pageby)
3845 - def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
3846 if limitby: 3847 (lmin, lmax) = limitby 3848 sql_s += ' TOP %i' % lmax 3849 return 'SELECT %s %s FROM %s%s%s;' % (sql_s, sql_f, sql_t, sql_w, sql_o)
3850
3851 - def _truncate(self, table, mode=''):
3852 tablename = table._tablename 3853 return ['DELETE FROM %s ALL;' % (tablename)]
3854 3855 INGRES_SEQNAME='ii***lineitemsequence' # NOTE invalid database object name
3856 # (ANSI-SQL wants this form of name 3857 # to be a delimited identifier) 3858 3859 -class IngresAdapter(BaseAdapter):
3860 drivers = ('pyodbc',) 3861 3862 types = { 3863 'boolean': 'CHAR(1)', 3864 'string': 'VARCHAR(%(length)s)', 3865 'text': 'CLOB', 3866 'json': 'CLOB', 3867 'password': 'VARCHAR(%(length)s)', ## Not sure what this contains utf8 or nvarchar. Or even bytes? 3868 'blob': 'BLOB', 3869 'upload': 'VARCHAR(%(length)s)', ## FIXME utf8 or nvarchar... or blob? what is this type? 3870 'integer': 'INTEGER4', # or int8... 3871 'bigint': 'BIGINT', 3872 'float': 'FLOAT', 3873 'double': 'FLOAT8', 3874 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', 3875 'date': 'ANSIDATE', 3876 'time': 'TIME WITHOUT TIME ZONE', 3877 'datetime': 'TIMESTAMP WITHOUT TIME ZONE', 3878 'id': 'int not null unique with default next value for %s' % INGRES_SEQNAME, 3879 'reference': 'INT, FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 3880 'list:integer': 'CLOB', 3881 'list:string': 'CLOB', 3882 'list:reference': 'CLOB', 3883 'big-id': 'bigint not null unique with default next value for %s' % INGRES_SEQNAME, 3884 'big-reference': 'BIGINT, FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 3885 'reference FK': ', CONSTRAINT FK_%(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 3886 'reference TFK': ' CONSTRAINT FK_%(foreign_table)s_PK FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s', ## FIXME TODO 3887 } 3888
3889 - def LEFT_JOIN(self):
3890 return 'LEFT OUTER JOIN'
3891
3892 - def RANDOM(self):
3893 return 'RANDOM()'
3894
3895 - def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
3896 if limitby: 3897 (lmin, lmax) = limitby 3898 fetch_amt = lmax - lmin 3899 if fetch_amt: 3900 sql_s += ' FIRST %d ' % (fetch_amt, ) 3901 if lmin: 3902 # Requires Ingres 9.2+ 3903 sql_o += ' OFFSET %d' % (lmin, ) 3904 return 'SELECT %s %s FROM %s%s%s;' % (sql_s, sql_f, sql_t, sql_w, sql_o)
3905
3906 - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', 3907 credential_decoder=IDENTITY, driver_args={}, 3908 adapter_args={}, do_connect=True, after_connection=None):
3909 self.db = db 3910 self.dbengine = "ingres" 3911 self._driver = pyodbc 3912 self.uri = uri 3913 if do_connect: self.find_driver(adapter_args,uri) 3914 self.pool_size = pool_size 3915 self.folder = folder 3916 self.db_codec = db_codec 3917 self._after_connection = after_connection 3918 self.find_or_make_work_folder() 3919 connstr = uri.split(':', 1)[1] 3920 # Simple URI processing 3921 connstr = connstr.lstrip() 3922 while connstr.startswith('/'): 3923 connstr = connstr[1:] 3924 if '=' in connstr: 3925 # Assume we have a regular ODBC connection string and just use it 3926 ruri = connstr 3927 else: 3928 # Assume only (local) dbname is passed in with OS auth 3929 database_name = connstr 3930 default_driver_name = 'Ingres' 3931 vnode = '(local)' 3932 servertype = 'ingres' 3933 ruri = 'Driver={%s};Server=%s;Database=%s' % (default_driver_name, vnode, database_name) 3934 def connector(cnxn=ruri,driver_args=driver_args): 3935 return self.driver.connect(cnxn,**driver_args)
3936 3937 self.connector = connector 3938 3939 # TODO if version is >= 10, set types['id'] to Identity column, see http://community.actian.com/wiki/Using_Ingres_Identity_Columns 3940 if do_connect: self.reconnect()
3941
3942 - def create_sequence_and_triggers(self, query, table, **args):
3943 # post create table auto inc code (if needed) 3944 # modify table to btree for performance.... 3945 # Older Ingres releases could use rule/trigger like Oracle above. 3946 if hasattr(table,'_primarykey'): 3947 modify_tbl_sql = 'modify %s to btree unique on %s' % \ 3948 (table._tablename, 3949 ', '.join(["'%s'" % x for x in table.primarykey])) 3950 self.execute(modify_tbl_sql) 3951 else: 3952 tmp_seqname='%s_iisq' % table._tablename 3953 query=query.replace(INGRES_SEQNAME, tmp_seqname) 3954 self.execute('create sequence %s' % tmp_seqname) 3955 self.execute(query) 3956 self.execute('modify %s to btree unique on %s' % (table._tablename, 'id'))
3957 3958
3959 - def lastrowid(self,table):
3960 tmp_seqname='%s_iisq' % table 3961 self.execute('select current value for %s' % tmp_seqname) 3962 return int(self.cursor.fetchone()[0]) # don't really need int type cast here...
3963
3964 - def integrity_error_class(self):
3965 return self._driver.IntegrityError
3966
3967 3968 -class IngresUnicodeAdapter(IngresAdapter):
3969 3970 drivers = ('pyodbc',) 3971 3972 types = { 3973 'boolean': 'CHAR(1)', 3974 'string': 'NVARCHAR(%(length)s)', 3975 'text': 'NCLOB', 3976 'json': 'NCLOB', 3977 'password': 'NVARCHAR(%(length)s)', ## Not sure what this contains utf8 or nvarchar. Or even bytes? 3978 'blob': 'BLOB', 3979 'upload': 'VARCHAR(%(length)s)', ## FIXME utf8 or nvarchar... or blob? what is this type? 3980 'integer': 'INTEGER4', # or int8... 3981 'bigint': 'BIGINT', 3982 'float': 'FLOAT', 3983 'double': 'FLOAT8', 3984 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', 3985 'date': 'ANSIDATE', 3986 'time': 'TIME WITHOUT TIME ZONE', 3987 'datetime': 'TIMESTAMP WITHOUT TIME ZONE', 3988 'id': 'INTEGER4 not null unique with default next value for %s'% INGRES_SEQNAME, 3989 'reference': 'INTEGER4, FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 3990 'list:integer': 'NCLOB', 3991 'list:string': 'NCLOB', 3992 'list:reference': 'NCLOB', 3993 'big-id': 'BIGINT not null unique with default next value for %s'% INGRES_SEQNAME, 3994 'big-reference': 'BIGINT, FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 3995 'reference FK': ', CONSTRAINT FK_%(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 3996 'reference TFK': ' CONSTRAINT FK_%(foreign_table)s_PK FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s', ## FIXME TODO 3997 }
3998
3999 -class SAPDBAdapter(BaseAdapter):
4000 drivers = ('sapdb',) 4001 4002 support_distributed_transaction = False 4003 types = { 4004 'boolean': 'CHAR(1)', 4005 'string': 'VARCHAR(%(length)s)', 4006 'text': 'LONG', 4007 'json': 'LONG', 4008 'password': 'VARCHAR(%(length)s)', 4009 'blob': 'LONG', 4010 'upload': 'VARCHAR(%(length)s)', 4011 'integer': 'INT', 4012 'bigint': 'BIGINT', 4013 'float': 'FLOAT', 4014 'double': 'DOUBLE PRECISION', 4015 'decimal': 'FIXED(%(precision)s,%(scale)s)', 4016 'date': 'DATE', 4017 'time': 'TIME', 4018 'datetime': 'TIMESTAMP', 4019 'id': 'INT PRIMARY KEY', 4020 'reference': 'INT, FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 4021 'list:integer': 'LONG', 4022 'list:string': 'LONG', 4023 'list:reference': 'LONG', 4024 'big-id': 'BIGINT PRIMARY KEY', 4025 'big-reference': 'BIGINT, FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 4026 } 4027
4028 - def sequence_name(self,table):
4029 return '%s_id_Seq' % table
4030
4031 - def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
4032 if limitby: 4033 (lmin, lmax) = limitby 4034 if len(sql_w) > 1: 4035 sql_w_row = sql_w + ' AND w_row > %i' % lmin 4036 else: 4037 sql_w_row = 'WHERE w_row > %i' % lmin 4038 return '%s %s FROM (SELECT w_tmp.*, ROWNO w_row FROM (SELECT %s FROM %s%s%s) w_tmp WHERE ROWNO=%i) %s %s %s;' % (sql_s, sql_f, sql_f, sql_t, sql_w, sql_o, lmax, sql_t, sql_w_row, sql_o) 4039 return 'SELECT %s %s FROM %s%s%s;' % (sql_s, sql_f, sql_t, sql_w, sql_o)
4040
4041 - def create_sequence_and_triggers(self, query, table, **args):
4042 # following lines should only be executed if table._sequence_name does not exist 4043 self.execute('CREATE SEQUENCE %s;' % table._sequence_name) 4044 self.execute("ALTER TABLE %s ALTER COLUMN %s SET DEFAULT NEXTVAL('%s');" \ 4045 % (table._tablename, table._id.name, table._sequence_name)) 4046 self.execute(query)
4047 4048 REGEX_URI = re.compile('^(?P<user>[^:@]+)(\:(?P<password>[^@]*))?@(?P<host>[^\:@]+)(\:(?P<port>[0-9]+))?/(?P<db>[^\?]+)(\?sslmode=(?P<sslmode>.+))?$') 4049 4050
4051 - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', 4052 credential_decoder=IDENTITY, driver_args={}, 4053 adapter_args={}, do_connect=True, after_connection=None):
4054 self.db = db 4055 self.dbengine = "sapdb" 4056 self.uri = uri 4057 if do_connect: self.find_driver(adapter_args,uri) 4058 self.pool_size = pool_size 4059 self.folder = folder 4060 self.db_codec = db_codec 4061 self._after_connection = after_connection 4062 self.find_or_make_work_folder() 4063 ruri = uri.split('://',1)[1] 4064 m = self.REGEX_URI.match(ruri) 4065 if not m: 4066 raise SyntaxError("Invalid URI string in DAL") 4067 user = credential_decoder(m.group('user')) 4068 if not user: 4069 raise SyntaxError('User required') 4070 password = credential_decoder(m.group('password')) 4071 if not password: 4072 password = '' 4073 host = m.group('host') 4074 if not host: 4075 raise SyntaxError('Host name required') 4076 db = m.group('db') 4077 if not db: 4078 raise SyntaxError('Database name required') 4079 def connector(user=user, password=password, database=db, 4080 host=host, driver_args=driver_args): 4081 return self.driver.Connection(user, password, database, 4082 host, **driver_args)
4083 self.connector = connector 4084 if do_connect: self.reconnect()
4085
4086 - def lastrowid(self,table):
4087 self.execute("select %s.NEXTVAL from dual" % table._sequence_name) 4088 return int(self.cursor.fetchone()[0])
4089
4090 -class CubridAdapter(MySQLAdapter):
4091 drivers = ('cubriddb',) 4092 4093 REGEX_URI = re.compile('^(?P<user>[^:@]+)(\:(?P<password>[^@]*))?@(?P<host>[^\:/]+)(\:(?P<port>[0-9]+))?/(?P<db>[^?]+)(\?set_encoding=(?P<charset>\w+))?$') 4094
4095 - def __init__(self, db, uri, pool_size=0, folder=None, db_codec='UTF-8', 4096 credential_decoder=IDENTITY, driver_args={}, 4097 adapter_args={}, do_connect=True, after_connection=None):
4098 self.db = db 4099 self.dbengine = "cubrid" 4100 self.uri = uri 4101 if do_connect: self.find_driver(adapter_args,uri) 4102 self.pool_size = pool_size 4103 self.folder = folder 4104 self.db_codec = db_codec 4105 self._after_connection = after_connection 4106 self.find_or_make_work_folder() 4107 ruri = uri.split('://',1)[1] 4108 m = self.REGEX_URI.match(ruri) 4109 if not m: 4110 raise SyntaxError( 4111 "Invalid URI string in DAL: %s" % self.uri) 4112 user = credential_decoder(m.group('user')) 4113 if not user: 4114 raise SyntaxError('User required') 4115 password = credential_decoder(m.group('password')) 4116 if not password: 4117 password = '' 4118 host = m.group('host') 4119 if not host: 4120 raise SyntaxError('Host name required') 4121 db = m.group('db') 4122 if not db: 4123 raise SyntaxError('Database name required') 4124 port = int(m.group('port') or '30000') 4125 charset = m.group('charset') or 'utf8' 4126 user = credential_decoder(user) 4127 passwd = credential_decoder(password) 4128 def connector(host=host,port=port,db=db, 4129 user=user,passwd=password,driver_args=driver_args): 4130 return self.driver.connect(host,port,db,user,passwd,**driver_args)
4131 self.connector = connector 4132 if do_connect: self.reconnect()
4133
4134 - def after_connection(self):
4135 self.execute('SET FOREIGN_KEY_CHECKS=1;') 4136 self.execute("SET sql_mode='NO_BACKSLASH_ESCAPES';")
4137
4138 4139 ######## GAE MySQL ########## 4140 4141 -class DatabaseStoredFile:
4142 4143 web2py_filesystem = False 4144
4145 - def escape(self,obj):
4146 return self.db._adapter.escape(obj)
4147
4148 - def __init__(self,db,filename,mode):
4149 if not db._adapter.dbengine in ('mysql', 'postgres'): 4150 raise RuntimeError("only MySQL/Postgres can store metadata .table files in database for now") 4151 self.db = db 4152 self.filename = filename 4153 self.mode = mode 4154 if not self.web2py_filesystem: 4155 if db._adapter.dbengine == 'mysql': 4156 sql = "CREATE TABLE IF NOT EXISTS web2py_filesystem (path VARCHAR(255), content LONGTEXT, PRIMARY KEY(path) ) ENGINE=InnoDB;" 4157 elif db._adapter.dbengine == 'postgres': 4158 sql = "CREATE TABLE IF NOT EXISTS web2py_filesystem (path VARCHAR(255), content TEXT, PRIMARY KEY(path));" 4159 self.db.executesql(sql) 4160 DatabaseStoredFile.web2py_filesystem = True 4161 self.p=0 4162 self.data = '' 4163 if mode in ('r','rw','a'): 4164 query = "SELECT content FROM web2py_filesystem WHERE path='%s'" \ 4165 % filename 4166 rows = self.db.executesql(query) 4167 if rows: 4168 self.data = rows[0][0] 4169 elif exists(filename): 4170 datafile = open(filename, 'r') 4171 try: 4172 self.data = datafile.read() 4173 finally: 4174 datafile.close() 4175 elif mode in ('r','rw'): 4176 raise RuntimeError("File %s does not exist" % filename)
4177
4178 - def read(self, bytes):
4179 data = self.data[self.p:self.p+bytes] 4180 self.p += len(data) 4181 return data
4182
4183 - def readline(self):
4184 i = self.data.find('\n',self.p)+1 4185 if i>0: 4186 data, self.p = self.data[self.p:i], i 4187 else: 4188 data, self.p = self.data[self.p:], len(self.data) 4189 return data
4190
4191 - def write(self,data):
4192 self.data += data
4193
4194 - def close_connection(self):
4195 if self.db is not None: 4196 self.db.executesql( 4197 "DELETE FROM web2py_filesystem WHERE path='%s'" % self.filename) 4198 query = "INSERT INTO web2py_filesystem(path,content) VALUES ('%s','%s')"\ 4199 % (self.filename, self.data.replace("'","''")) 4200 self.db.executesql(query) 4201 self.db.commit() 4202 self.db = None
4203
4204 - def close(self):
4205 self.close_connection()
4206 4207 @staticmethod
4208 - def exists(db, filename):
4209 if exists(filename): 4210 return True 4211 query = "SELECT path FROM web2py_filesystem WHERE path='%s'" % filename 4212 if db.executesql(query): 4213 return True 4214 return False
4215
4216 4217 -class UseDatabaseStoredFile:
4218
4219 - def file_exists(self, filename):
4220 return DatabaseStoredFile.exists(self.db,filename)
4221
4222 - def file_open(self, filename, mode='rb', lock=True):
4223 return DatabaseStoredFile(self.db,filename,mode)
4224
4225 - def file_close(self, fileobj):
4226 fileobj.close_connection()
4227
4228 - def file_delete(self,filename):
4229 query = "DELETE FROM web2py_filesystem WHERE path='%s'" % filename 4230 self.db.executesql(query) 4231 self.db.commit()
4232
4233 -class GoogleSQLAdapter(UseDatabaseStoredFile,MySQLAdapter):
4234 uploads_in_blob = True 4235 4236 REGEX_URI = re.compile('^(?P<instance>.*)/(?P<db>.*)$') 4237
4238 - def __init__(self, db, uri='google:sql://realm:domain/database', 4239 pool_size=0, folder=None, db_codec='UTF-8', 4240 credential_decoder=IDENTITY, driver_args={}, 4241 adapter_args={}, do_connect=True, after_connection=None):
4242 4243 self.db = db 4244 self.dbengine = "mysql" 4245 self.uri = uri 4246 self.pool_size = pool_size 4247 self.db_codec = db_codec 4248 self._after_connection = after_connection 4249 self.folder = folder or pjoin('$HOME',THREAD_LOCAL.folder.split( 4250 os.sep+'applications'+os.sep,1)[1]) 4251 ruri = uri.split("://")[1] 4252 m = self.REGEX_URI.match(ruri) 4253 if not m: 4254 raise SyntaxError("Invalid URI string in SQLDB: %s" % self.uri) 4255 instance = credential_decoder(m.group('instance')) 4256 self.dbstring = db = credential_decoder(m.group('db')) 4257 driver_args['instance'] = instance 4258 if not 'charset' in driver_args: 4259 driver_args['charset'] = 'utf8' 4260 self.createdb = createdb = adapter_args.get('createdb',True) 4261 if not createdb: 4262 driver_args['database'] = db 4263 def connector(driver_args=driver_args): 4264 return rdbms.connect(**driver_args)
4265 self.connector = connector 4266 if do_connect: self.reconnect()
4267
4268 - def after_connection(self):
4269 if self.createdb: 4270 # self.execute('DROP DATABASE %s' % self.dbstring) 4271 self.execute('CREATE DATABASE IF NOT EXISTS %s' % self.dbstring) 4272 self.execute('USE %s' % self.dbstring) 4273 self.execute("SET FOREIGN_KEY_CHECKS=1;") 4274 self.execute("SET sql_mode='NO_BACKSLASH_ESCAPES';")
4275
4276 - def execute(self, command, *a, **b):
4277 return self.log_execute(command.decode('utf8'), *a, **b)
4278
4279 -class NoSQLAdapter(BaseAdapter):
4280 can_select_for_update = False 4281 4282 @staticmethod
4283 - def to_unicode(obj):
4284 if isinstance(obj, str): 4285 return obj.decode('utf8') 4286 elif not isinstance(obj, unicode): 4287 return unicode(obj) 4288 return obj
4289
4290 - def id_query(self, table):
4291 return table._id > 0
4292
4293 - def represent(self, obj, fieldtype):
4294 field_is_type = fieldtype.startswith 4295 if isinstance(obj, CALLABLETYPES): 4296 obj = obj() 4297 if isinstance(fieldtype, SQLCustomType): 4298 return fieldtype.encoder(obj) 4299 if isinstance(obj, (Expression, Field)): 4300 raise SyntaxError("non supported on GAE") 4301 if self.dbengine == 'google:datastore': 4302 if isinstance(fieldtype, gae.Property): 4303 return obj 4304 is_string = isinstance(fieldtype,str) 4305 is_list = is_string and field_is_type('list:') 4306 if is_list: 4307 if not obj: 4308 obj = [] 4309 if not isinstance(obj, (list, tuple)): 4310 obj = [obj] 4311 if obj == '' and not \ 4312 (is_string and fieldtype[:2] in ['st','te', 'pa','up']): 4313 return None 4314 if not obj is None: 4315 if isinstance(obj, list) and not is_list: 4316 obj = [self.represent(o, fieldtype) for o in obj] 4317 elif fieldtype in ('integer','bigint','id'): 4318 obj = long(obj) 4319 elif fieldtype == 'double': 4320 obj = float(obj) 4321 elif is_string and field_is_type('reference'): 4322 if isinstance(obj, (Row, Reference)): 4323 obj = obj['id'] 4324 obj = long(obj) 4325 elif fieldtype == 'boolean': 4326 if obj and not str(obj)[0].upper() in '0F': 4327 obj = True 4328 else: 4329 obj = False 4330 elif fieldtype == 'date': 4331 if not isinstance(obj, datetime.date): 4332 (y, m, d) = map(int,str(obj).strip().split('-')) 4333 obj = datetime.date(y, m, d) 4334 elif isinstance(obj,datetime.datetime): 4335 (y, m, d) = (obj.year, obj.month, obj.day) 4336 obj = datetime.date(y, m, d) 4337 elif fieldtype == 'time': 4338 if not isinstance(obj, datetime.time): 4339 time_items = map(int,str(obj).strip().split(':')[:3]) 4340 if len(time_items) == 3: 4341 (h, mi, s) = time_items 4342 else: 4343 (h, mi, s) = time_items + [0] 4344 obj = datetime.time(h, mi, s) 4345 elif fieldtype == 'datetime': 4346 if not isinstance(obj, datetime.datetime): 4347 (y, m, d) = map(int,str(obj)[:10].strip().split('-')) 4348 time_items = map(int,str(obj)[11:].strip().split(':')[:3]) 4349 while len(time_items)<3: 4350 time_items.append(0) 4351 (h, mi, s) = time_items 4352 obj = datetime.datetime(y, m, d, h, mi, s) 4353 elif fieldtype == 'blob': 4354 pass 4355 elif fieldtype == 'json': 4356 obj = self.to_unicode(obj) 4357 if have_serializers: 4358 obj = serializers.loads_json(obj) 4359 elif simplejson: 4360 obj = simplejson.loads(obj) 4361 else: 4362 raise RuntimeError("missing simplejson") 4363 elif is_string and field_is_type('list:string'): 4364 return map(self.to_unicode,obj) 4365 elif is_list: 4366 return map(int,obj) 4367 else: 4368 obj = self.to_unicode(obj) 4369 return obj
4370
4371 - def _insert(self,table,fields):
4372 return 'insert %s in %s' % (fields, table)
4373
4374 - def _count(self,query,distinct=None):
4375 return 'count %s' % repr(query)
4376
4377 - def _select(self,query,fields,attributes):
4378 return 'select %s where %s' % (repr(fields), repr(query))
4379
4380 - def _delete(self,tablename, query):
4381 return 'delete %s where %s' % (repr(tablename),repr(query))
4382
4383 - def _update(self,tablename,query,fields):
4384 return 'update %s (%s) where %s' % (repr(tablename), 4385 repr(fields),repr(query))
4386
4387 - def commit(self):
4388 """ 4389 remember: no transactions on many NoSQL 4390 """ 4391 pass
4392
4393 - def rollback(self):
4394 """ 4395 remember: no transactions on many NoSQL 4396 """ 4397 pass
4398
4399 - def close_connection(self):
4400 """ 4401 remember: no transactions on many NoSQL 4402 """ 4403 pass
4404 4405 4406 # these functions should never be called!
4407 - def OR(self,first,second): raise SyntaxError("Not supported")
4408 - def AND(self,first,second): raise SyntaxError("Not supported")
4409 - def AS(self,first,second): raise SyntaxError("Not supported")
4410 - def ON(self,first,second): raise SyntaxError("Not supported")
4411 - def STARTSWITH(self,first,second=None): raise SyntaxError("Not supported")
4412 - def ENDSWITH(self,first,second=None): raise SyntaxError("Not supported")
4413 - def ADD(self,first,second): raise SyntaxError("Not supported")
4414 - def SUB(self,first,second): raise SyntaxError("Not supported")
4415 - def MUL(self,first,second): raise SyntaxError("Not supported")
4416 - def DIV(self,first,second): raise SyntaxError("Not supported")
4417 - def LOWER(self,first): raise SyntaxError("Not supported")
4418 - def UPPER(self,first): raise SyntaxError("Not supported")
4419 - def EXTRACT(self,first,what): raise SyntaxError("Not supported")
4420 - def AGGREGATE(self,first,what): raise SyntaxError("Not supported")
4421 - def LEFT_JOIN(self): raise SyntaxError("Not supported")
4422 - def RANDOM(self): raise SyntaxError("Not supported")
4423 - def SUBSTRING(self,field,parameters): raise SyntaxError("Not supported")
4424 - def PRIMARY_KEY(self,key): raise SyntaxError("Not supported")
4425 - def ILIKE(self,first,second): raise SyntaxError("Not supported")
4426 - def drop(self,table,mode): raise SyntaxError("Not supported")
4427 - def alias(self,table,alias): raise SyntaxError("Not supported")
4428 - def migrate_table(self,*a,**b): raise SyntaxError("Not supported")
4429 - def distributed_transaction_begin(self,key): raise SyntaxError("Not supported")
4430 - def prepare(self,key): raise SyntaxError("Not supported")
4431 - def commit_prepared(self,key): raise SyntaxError("Not supported")
4432 - def rollback_prepared(self,key): raise SyntaxError("Not supported")
4433 - def concat_add(self,table): raise SyntaxError("Not supported")
4434 - def constraint_name(self, table, fieldname): raise SyntaxError("Not supported")
4435 - def create_sequence_and_triggers(self, query, table, **args): pass
4436 - def log_execute(self,*a,**b): raise SyntaxError("Not supported")
4437 - def execute(self,*a,**b): raise SyntaxError("Not supported")
4438 - def represent_exceptions(self, obj, fieldtype): raise SyntaxError("Not supported")
4439 - def lastrowid(self,table): raise SyntaxError("Not supported")
4440 - def integrity_error_class(self): raise SyntaxError("Not supported")
4441 - def rowslice(self,rows,minimum=0,maximum=None): raise SyntaxError("Not supported")
4442
4443 4444 -class GAEF(object):
4445 - def __init__(self,name,op,value,apply):
4446 self.name=name=='id' and '__key__' or name 4447 self.op=op 4448 self.value=value 4449 self.apply=apply
4450 - def __repr__(self):
4451 return '(%s %s %s:%s)' % (self.name, self.op, repr(self.value), type(self.value))
4452
4453 -class GoogleDatastoreAdapter(NoSQLAdapter):
4454 uploads_in_blob = True 4455 types = {} 4456
4457 - def file_exists(self, filename): pass
4458 - def file_open(self, filename, mode='rb', lock=True): pass
4459 - def file_close(self, fileobj): pass
4460 4461 REGEX_NAMESPACE = re.compile('.*://(?P<namespace>.+)') 4462
4463 - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', 4464 credential_decoder=IDENTITY, driver_args={}, 4465 adapter_args={}, do_connect=True, after_connection=None):
4466 self.types.update({ 4467 'boolean': gae.BooleanProperty, 4468 'string': (lambda **kwargs: gae.StringProperty(multiline=True, **kwargs)), 4469 'text': gae.TextProperty, 4470 'json': gae.TextProperty, 4471 'password': gae.StringProperty, 4472 'blob': gae.BlobProperty, 4473 'upload': gae.StringProperty, 4474 'integer': gae.IntegerProperty, 4475 'bigint': gae.IntegerProperty, 4476 'float': gae.FloatProperty, 4477 'double': gae.FloatProperty, 4478 'decimal': GAEDecimalProperty, 4479 'date': gae.DateProperty, 4480 'time': gae.TimeProperty, 4481 'datetime': gae.DateTimeProperty, 4482 'id': None, 4483 'reference': gae.IntegerProperty, 4484 'list:string': (lambda **kwargs: gae.StringListProperty(default=None, **kwargs)), 4485 'list:integer': (lambda **kwargs: gae.ListProperty(int,default=None, **kwargs)), 4486 'list:reference': (lambda **kwargs: gae.ListProperty(int,default=None, **kwargs)), 4487 }) 4488 self.db = db 4489 self.uri = uri 4490 self.dbengine = 'google:datastore' 4491 self.folder = folder 4492 db['_lastsql'] = '' 4493 self.db_codec = 'UTF-8' 4494 self._after_connection = after_connection 4495 self.pool_size = 0 4496 match = self.REGEX_NAMESPACE.match(uri) 4497 if match: 4498 namespace_manager.set_namespace(match.group('namespace'))
4499
4500 - def parse_id(self, value, field_type):
4501 return value
4502
4503 - def create_table(self,table,migrate=True,fake_migrate=False, polymodel=None):
4504 myfields = {} 4505 for field in table: 4506 if isinstance(polymodel,Table) and field.name in polymodel.fields(): 4507 continue 4508 attr = {} 4509 if isinstance(field.custom_qualifier, dict): 4510 #this is custom properties to add to the GAE field declartion 4511 attr = field.custom_qualifier 4512 field_type = field.type 4513 if isinstance(field_type, SQLCustomType): 4514 ftype = self.types[field_type.native or field_type.type](**attr) 4515 elif isinstance(field_type, gae.Property): 4516 ftype = field_type 4517 elif field_type.startswith('id'): 4518 continue 4519 elif field_type.startswith('decimal'): 4520 precision, scale = field_type[7:].strip('()').split(',') 4521 precision = int(precision) 4522 scale = int(scale) 4523 ftype = GAEDecimalProperty(precision, scale, **attr) 4524 elif field_type.startswith('reference'): 4525 if field.notnull: 4526 attr = dict(required=True) 4527 referenced = field_type[10:].strip() 4528 ftype = self.types[field_type[:9]](referenced, **attr) 4529 elif field_type.startswith('list:reference'): 4530 if field.notnull: 4531 attr['required'] = True 4532 referenced = field_type[15:].strip() 4533 ftype = self.types[field_type[:14]](**attr) 4534 elif field_type.startswith('list:'): 4535 ftype = self.types[field_type](**attr) 4536 elif not field_type in self.types\ 4537 or not self.types[field_type]: 4538 raise SyntaxError('Field: unknown field type: %s' % field_type) 4539 else: 4540 ftype = self.types[field_type](**attr) 4541 myfields[field.name] = ftype 4542 if not polymodel: 4543 table._tableobj = classobj(table._tablename, (gae.Model, ), myfields) 4544 elif polymodel==True: 4545 table._tableobj = classobj(table._tablename, (PolyModel, ), myfields) 4546 elif isinstance(polymodel,Table): 4547 table._tableobj = classobj(table._tablename, (polymodel._tableobj, ), myfields) 4548 else: 4549 raise SyntaxError("polymodel must be None, True, a table or a tablename") 4550 return None
4551
4552 - def expand(self,expression,field_type=None):
4553 if isinstance(expression,Field): 4554 if expression.type in ('text', 'blob', 'json'): 4555 raise SyntaxError('AppEngine does not index by: %s' % expression.type) 4556 return expression.name 4557 elif isinstance(expression, (Expression, Query)): 4558 if not expression.second is None: 4559 return expression.op(expression.first, expression.second) 4560 elif not expression.first is None: 4561 return expression.op(expression.first) 4562 else: 4563 return expression.op() 4564 elif field_type: 4565 return self.represent(expression,field_type) 4566 elif isinstance(expression,(list,tuple)): 4567 return ','.join([self.represent(item,field_type) for item in expression]) 4568 else: 4569 return str(expression)
4570 4571 ### TODO from gql.py Expression
4572 - def AND(self,first,second):
4573 a = self.expand(first) 4574 b = self.expand(second) 4575 if b[0].name=='__key__' and a[0].name!='__key__': 4576 return b+a 4577 return a+b
4578
4579 - def EQ(self,first,second=None):
4580 if isinstance(second, Key): 4581 return [GAEF(first.name,'=',second,lambda a,b:a==b)] 4582 return [GAEF(first.name,'=',self.represent(second,first.type),lambda a,b:a==b)]
4583
4584 - def NE(self,first,second=None):
4585 if first.type != 'id': 4586 return [GAEF(first.name,'!=',self.represent(second,first.type),lambda a,b:a!=b)] 4587 else: 4588 if not second is None: 4589 second = Key.from_path(first._tablename, long(second)) 4590 return [GAEF(first.name,'!=',second,lambda a,b:a!=b)]
4591
4592 - def LT(self,first,second=None):
4593 if first.type != 'id': 4594 return [GAEF(first.name,'<',self.represent(second,first.type),lambda a,b:a<b)] 4595 else: 4596 second = Key.from_path(first._tablename, long(second)) 4597 return [GAEF(first.name,'<',second,lambda a,b:a<b)]
4598
4599 - def LE(self,first,second=None):
4600 if first.type != 'id': 4601 return [GAEF(first.name,'<=',self.represent(second,first.type),lambda a,b:a<=b)] 4602 else: 4603 second = Key.from_path(first._tablename, long(second)) 4604 return [GAEF(first.name,'<=',second,lambda a,b:a<=b)]
4605
4606 - def GT(self,first,second=None):
4607 if first.type != 'id' or second==0 or second == '0': 4608 return [GAEF(first.name,'>',self.represent(second,first.type),lambda a,b:a>b)] 4609 else: 4610 second = Key.from_path(first._tablename, long(second)) 4611 return [GAEF(first.name,'>',second,lambda a,b:a>b)]
4612
4613 - def GE(self,first,second=None):
4614 if first.type != 'id': 4615 return [GAEF(first.name,'>=',self.represent(second,first.type),lambda a,b:a>=b)] 4616 else: 4617 second = Key.from_path(first._tablename, long(second)) 4618 return [GAEF(first.name,'>=',second,lambda a,b:a>=b)]
4619
4620 - def INVERT(self,first):
4621 return '-%s' % first.name
4622
4623 - def COMMA(self,first,second):
4624 return '%s, %s' % (self.expand(first),self.expand(second))
4625
4626 - def BELONGS(self,first,second=None):
4627 if not isinstance(second,(list, tuple)): 4628 raise SyntaxError("Not supported") 4629 if first.type != 'id': 4630 return [GAEF(first.name,'in',self.represent(second,first.type),lambda a,b:a in b)] 4631 else: 4632 second = [Key.from_path(first._tablename, int(i)) for i in second] 4633 return [GAEF(first.name,'in',second,lambda a,b:a in b)]
4634
4635 - def CONTAINS(self,first,second,case_sensitive=False):
4636 # silently ignoring: GAE can only do case sensitive matches! 4637 if not first.type.startswith('list:'): 4638 raise SyntaxError("Not supported") 4639 return [GAEF(first.name,'=',self.expand(second,first.type[5:]),lambda a,b:b in a)]
4640
4641 - def NOT(self,first):
4642 nops = { self.EQ: self.NE, 4643 self.NE: self.EQ, 4644 self.LT: self.GE, 4645 self.GT: self.LE, 4646 self.LE: self.GT, 4647 self.GE: self.LT} 4648 if not isinstance(first,Query): 4649 raise SyntaxError("Not suported") 4650 nop = nops.get(first.op,None) 4651 if not nop: 4652 raise SyntaxError("Not suported %s" % first.op.__name__) 4653 first.op = nop 4654 return self.expand(first)
4655
4656 - def truncate(self,table,mode):
4657 self.db(table._id).delete()
4658
4659 - def select_raw(self,query,fields=None,attributes=None):
4660 db = self.db 4661 fields = fields or [] 4662 attributes = attributes or {} 4663 args_get = attributes.get 4664 new_fields = [] 4665 for item in fields: 4666 if isinstance(item,SQLALL): 4667 new_fields += item._table 4668 else: 4669 new_fields.append(item) 4670 fields = new_fields 4671 if query: 4672 tablename = self.get_table(query) 4673 elif fields: 4674 tablename = fields[0].tablename 4675 query = db._adapter.id_query(fields[0].table) 4676 else: 4677 raise SyntaxError("Unable to determine a tablename") 4678 4679 if query: 4680 if use_common_filters(query): 4681 query = self.common_filter(query,[tablename]) 4682 4683 #tableobj is a GAE Model class (or subclass) 4684 tableobj = db[tablename]._tableobj 4685 filters = self.expand(query) 4686 4687 projection = None 4688 if len(db[tablename].fields) == len(fields): 4689 #getting all fields, not a projection query 4690 projection = None 4691 elif args_get('projection') == True: 4692 projection = [] 4693 for f in fields: 4694 if f.type in ['text', 'blob', 'json']: 4695 raise SyntaxError( 4696 "text and blob field types not allowed in projection queries") 4697 else: 4698 projection.append(f.name) 4699 4700 # projection's can't include 'id'. 4701 # it will be added to the result later 4702 query_projection = [ 4703 p for p in projection if \ 4704 p != db[tablename]._id.name] if projection \ 4705 else None 4706 4707 cursor = None 4708 if isinstance(args_get('reusecursor'), str): 4709 cursor = args_get('reusecursor') 4710 items = gae.Query(tableobj, projection=query_projection, 4711 cursor=cursor) 4712 4713 for filter in filters: 4714 if args_get('projection') == True and \ 4715 filter.name in query_projection and \ 4716 filter.op in ['=', '<=', '>=']: 4717 raise SyntaxError( 4718 "projection fields cannot have equality filters") 4719 if filter.name=='__key__' and filter.op=='>' and filter.value==0: 4720 continue 4721 elif filter.name=='__key__' and filter.op=='=': 4722 if filter.value==0: 4723 items = [] 4724 elif isinstance(filter.value, Key): 4725 # key qeuries return a class instance, 4726 # can't use projection 4727 # extra values will be ignored in post-processing later 4728 item = tableobj.get(filter.value) 4729 items = (item and [item]) or [] 4730 else: 4731 # key qeuries return a class instance, 4732 # can't use projection 4733 # extra values will be ignored in post-processing later 4734 item = tableobj.get_by_id(filter.value) 4735 items = (item and [item]) or [] 4736 elif isinstance(items,list): # i.e. there is a single record! 4737 items = [i for i in items if filter.apply( 4738 getattr(item,filter.name),filter.value)] 4739 else: 4740 if filter.name=='__key__' and filter.op != 'in': 4741 items.order('__key__') 4742 items = items.filter('%s %s' % (filter.name,filter.op), 4743 filter.value) 4744 if not isinstance(items,list): 4745 if args_get('left', None): 4746 raise SyntaxError('Set: no left join in appengine') 4747 if args_get('groupby', None): 4748 raise SyntaxError('Set: no groupby in appengine') 4749 orderby = args_get('orderby', False) 4750 if orderby: 4751 ### THIS REALLY NEEDS IMPROVEMENT !!! 4752 if isinstance(orderby, (list, tuple)): 4753 orderby = xorify(orderby) 4754 if isinstance(orderby,Expression): 4755 orderby = self.expand(orderby) 4756 orders = orderby.split(', ') 4757 for order in orders: 4758 order={'-id':'-__key__','id':'__key__'}.get(order,order) 4759 items = items.order(order) 4760 if args_get('limitby', None): 4761 (lmin, lmax) = attributes['limitby'] 4762 (limit, offset) = (lmax - lmin, lmin) 4763 rows = items.fetch(limit,offset=offset) 4764 #cursor is only useful if there was a limit and we didn't return 4765 # all results 4766 if args_get('reusecursor'): 4767 db['_lastcursor'] = items.cursor() 4768 items = rows 4769 return (items, tablename, projection or db[tablename].fields)
4770
4771 - def select(self,query,fields,attributes):
4772 """ 4773 This is the GAE version of select. some notes to consider: 4774 - db['_lastsql'] is not set because there is not SQL statement string 4775 for a GAE query 4776 - 'nativeRef' is a magical fieldname used for self references on GAE 4777 - optional attribute 'projection' when set to True will trigger 4778 use of the GAE projection queries. note that there are rules for 4779 what is accepted imposed by GAE: each field must be indexed, 4780 projection queries cannot contain blob or text fields, and you 4781 cannot use == and also select that same field. see https://developers.google.com/appengine/docs/python/datastore/queries#Query_Projection 4782 - optional attribute 'reusecursor' allows use of cursor with queries 4783 that have the limitby attribute. Set the attribute to True for the 4784 first query, set it to the value of db['_lastcursor'] to continue 4785 a previous query. The user must save the cursor value between 4786 requests, and the filters must be identical. It is up to the user 4787 to follow google's limitations: https://developers.google.com/appengine/docs/python/datastore/queries#Query_Cursors 4788 """ 4789 4790 (items, tablename, fields) = self.select_raw(query,fields,attributes) 4791 # self.db['_lastsql'] = self._select(query,fields,attributes) 4792 rows = [[(t==self.db[tablename]._id.name and item) or \ 4793 (t=='nativeRef' and item) or getattr(item, t) \ 4794 for t in fields] for item in items] 4795 colnames = ['%s.%s' % (tablename, t) for t in fields] 4796 processor = attributes.get('processor',self.parse) 4797 return processor(rows,fields,colnames,False)
4798
4799 - def count(self,query,distinct=None,limit=None):
4800 if distinct: 4801 raise RuntimeError("COUNT DISTINCT not supported") 4802 (items, tablename, fields) = self.select_raw(query) 4803 # self.db['_lastsql'] = self._count(query) 4804 try: 4805 return len(items) 4806 except TypeError: 4807 return items.count(limit=limit)
4808
4809 - def delete(self,tablename, query):
4810 """ 4811 This function was changed on 2010-05-04 because according to 4812 http://code.google.com/p/googleappengine/issues/detail?id=3119 4813 GAE no longer supports deleting more than 1000 records. 4814 """ 4815 # self.db['_lastsql'] = self._delete(tablename,query) 4816 (items, tablename, fields) = self.select_raw(query) 4817 # items can be one item or a query 4818 if not isinstance(items,list): 4819 #use a keys_only query to ensure that this runs as a datastore 4820 # small operations 4821 leftitems = items.fetch(1000, keys_only=True) 4822 counter = 0 4823 while len(leftitems): 4824 counter += len(leftitems) 4825 gae.delete(leftitems) 4826 leftitems = items.fetch(1000, keys_only=True) 4827 else: 4828 counter = len(items) 4829 gae.delete(items) 4830 return counter
4831
4832 - def update(self,tablename,query,update_fields):
4833 # self.db['_lastsql'] = self._update(tablename,query,update_fields) 4834 (items, tablename, fields) = self.select_raw(query) 4835 counter = 0 4836 for item in items: 4837 for field, value in update_fields: 4838 setattr(item, field.name, self.represent(value,field.type)) 4839 item.put() 4840 counter += 1 4841 LOGGER.info(str(counter)) 4842 return counter
4843
4844 - def insert(self,table,fields):
4845 dfields=dict((f.name,self.represent(v,f.type)) for f,v in fields) 4846 # table._db['_lastsql'] = self._insert(table,fields) 4847 tmp = table._tableobj(**dfields) 4848 tmp.put() 4849 rid = Reference(tmp.key().id()) 4850 (rid._table, rid._record, rid._gaekey) = (table, None, tmp.key()) 4851 return rid
4852
4853 - def bulk_insert(self,table,items):
4854 parsed_items = [] 4855 for item in items: 4856 dfields=dict((f.name,self.represent(v,f.type)) for f,v in item) 4857 parsed_items.append(table._tableobj(**dfields)) 4858 gae.put(parsed_items) 4859 return True
4860
4861 -def uuid2int(uuidv):
4862 return uuid.UUID(uuidv).int
4863
4864 -def int2uuid(n):
4865 return str(uuid.UUID(int=n))
4866
4867 -class CouchDBAdapter(NoSQLAdapter):
4868 drivers = ('couchdb',) 4869 4870 uploads_in_blob = True 4871 types = { 4872 'boolean': bool, 4873 'string': str, 4874 'text': str, 4875 'json': str, 4876 'password': str, 4877 'blob': str, 4878 'upload': str, 4879 'integer': long, 4880 'bigint': long, 4881 'float': float, 4882 'double': float, 4883 'date': datetime.date, 4884 'time': datetime.time, 4885 'datetime': datetime.datetime, 4886 'id': long, 4887 'reference': long, 4888 'list:string': list, 4889 'list:integer': list, 4890 'list:reference': list, 4891 } 4892
4893 - def file_exists(self, filename): pass
4894 - def file_open(self, filename, mode='rb', lock=True): pass
4895 - def file_close(self, fileobj): pass
4896
4897 - def expand(self,expression,field_type=None):
4898 if isinstance(expression,Field): 4899 if expression.type=='id': 4900 return "%s._id" % expression.tablename 4901 return BaseAdapter.expand(self,expression,field_type)
4902
4903 - def AND(self,first,second):
4904 return '(%s && %s)' % (self.expand(first),self.expand(second))
4905
4906 - def OR(self,first,second):
4907 return '(%s || %s)' % (self.expand(first),self.expand(second))
4908
4909 - def EQ(self,first,second):
4910 if second is None: 4911 return '(%s == null)' % self.expand(first) 4912 return '(%s == %s)' % (self.expand(first),self.expand(second,first.type))
4913
4914 - def NE(self,first,second):
4915 if second is None: 4916 return '(%s != null)' % self.expand(first) 4917 return '(%s != %s)' % (self.expand(first),self.expand(second,first.type))
4918
4919 - def COMMA(self,first,second):
4920 return '%s + %s' % (self.expand(first),self.expand(second))
4921
4922 - def represent(self, obj, fieldtype):
4923 value = NoSQLAdapter.represent(self, obj, fieldtype) 4924 if fieldtype=='id': 4925 return repr(str(int(value))) 4926 elif fieldtype in ('date','time','datetime','boolean'): 4927 return serializers.json(value) 4928 return repr(not isinstance(value,unicode) and value \ 4929 or value and value.encode('utf8'))
4930
4931 - def __init__(self,db,uri='couchdb://127.0.0.1:5984', 4932 pool_size=0,folder=None,db_codec ='UTF-8', 4933 credential_decoder=IDENTITY, driver_args={}, 4934 adapter_args={}, do_connect=True, after_connection=None):
4935 self.db = db 4936 self.uri = uri 4937 if do_connect: self.find_driver(adapter_args) 4938 self.dbengine = 'couchdb' 4939 self.folder = folder 4940 db['_lastsql'] = '' 4941 self.db_codec = 'UTF-8' 4942 self._after_connection = after_connection 4943 self.pool_size = pool_size 4944 4945 url='http://'+uri[10:] 4946 def connector(url=url,driver_args=driver_args): 4947 return self.driver.Server(url,**driver_args)
4948 self.reconnect(connector,cursor=False)
4949
4950 - def create_table(self, table, migrate=True, fake_migrate=False, polymodel=None):
4951 if migrate: 4952 try: 4953 self.connection.create(table._tablename) 4954 except: 4955 pass
4956
4957 - def insert(self,table,fields):
4958 id = uuid2int(web2py_uuid()) 4959 ctable = self.connection[table._tablename] 4960 values = dict((k.name,self.represent(v,k.type)) for k,v in fields) 4961 values['_id'] = str(id) 4962 ctable.save(values) 4963 return id
4964
4965 - def _select(self,query,fields,attributes):
4966 if not isinstance(query,Query): 4967 raise SyntaxError("Not Supported") 4968 for key in set(attributes.keys())-SELECT_ARGS: 4969 raise SyntaxError('invalid select attribute: %s' % key) 4970 new_fields=[] 4971 for item in fields: 4972 if isinstance(item,SQLALL): 4973 new_fields += item._table 4974 else: 4975 new_fields.append(item) 4976 def uid(fd): 4977 return fd=='id' and '_id' or fd
4978 def get(row,fd): 4979 return fd=='id' and int(row['_id']) or row.get(fd,None) 4980 fields = new_fields 4981 tablename = self.get_table(query) 4982 fieldnames = [f.name for f in (fields or self.db[tablename])] 4983 colnames = ['%s.%s' % (tablename,k) for k in fieldnames] 4984 fields = ','.join(['%s.%s' % (tablename,uid(f)) for f in fieldnames]) 4985 fn="(function(%(t)s){if(%(query)s)emit(%(order)s,[%(fields)s]);})" %\ 4986 dict(t=tablename, 4987 query=self.expand(query), 4988 order='%s._id' % tablename, 4989 fields=fields) 4990 return fn, colnames 4991
4992 - def select(self,query,fields,attributes):
4993 if not isinstance(query,Query): 4994 raise SyntaxError("Not Supported") 4995 fn, colnames = self._select(query,fields,attributes) 4996 tablename = colnames[0].split('.')[0] 4997 ctable = self.connection[tablename] 4998 rows = [cols['value'] for cols in ctable.query(fn)] 4999 processor = attributes.get('processor',self.parse) 5000 return processor(rows,fields,colnames,False)
5001
5002 - def delete(self,tablename,query):
5003 if not isinstance(query,Query): 5004 raise SyntaxError("Not Supported") 5005 if query.first.type=='id' and query.op==self.EQ: 5006 id = query.second 5007 tablename = query.first.tablename 5008 assert(tablename == query.first.tablename) 5009 ctable = self.connection[tablename] 5010 try: 5011 del ctable[str(id)] 5012 return 1 5013 except couchdb.http.ResourceNotFound: 5014 return 0 5015 else: 5016 tablename = self.get_table(query) 5017 rows = self.select(query,[self.db[tablename]._id],{}) 5018 ctable = self.connection[tablename] 5019 for row in rows: 5020 del ctable[str(row.id)] 5021 return len(rows)
5022
5023 - def update(self,tablename,query,fields):
5024 if not isinstance(query,Query): 5025 raise SyntaxError("Not Supported") 5026 if query.first.type=='id' and query.op==self.EQ: 5027 id = query.second 5028 tablename = query.first.tablename 5029 ctable = self.connection[tablename] 5030 try: 5031 doc = ctable[str(id)] 5032 for key,value in fields: 5033 doc[key.name] = self.represent(value,self.db[tablename][key.name].type) 5034 ctable.save(doc) 5035 return 1 5036 except couchdb.http.ResourceNotFound: 5037 return 0 5038 else: 5039 tablename = self.get_table(query) 5040 rows = self.select(query,[self.db[tablename]._id],{}) 5041 ctable = self.connection[tablename] 5042 table = self.db[tablename] 5043 for row in rows: 5044 doc = ctable[str(row.id)] 5045 for key,value in fields: 5046 doc[key.name] = self.represent(value,table[key.name].type) 5047 ctable.save(doc) 5048 return len(rows)
5049
5050 - def count(self,query,distinct=None):
5051 if distinct: 5052 raise RuntimeError("COUNT DISTINCT not supported") 5053 if not isinstance(query,Query): 5054 raise SyntaxError("Not Supported") 5055 tablename = self.get_table(query) 5056 rows = self.select(query,[self.db[tablename]._id],{}) 5057 return len(rows)
5058
5059 -def cleanup(text):
5060 """ 5061 validates that the given text is clean: only contains [0-9a-zA-Z_] 5062 """ 5063 if not REGEX_ALPHANUMERIC.match(text): 5064 raise SyntaxError('invalid table or field name: %s' % text) 5065 return text
5066
5067 -class MongoDBAdapter(NoSQLAdapter):
5068 native_json = True 5069 drivers = ('pymongo',) 5070 5071 uploads_in_blob = True 5072 5073 types = { 5074 'boolean': bool, 5075 'string': str, 5076 'text': str, 5077 'json': str, 5078 'password': str, 5079 'blob': str, 5080 'upload': str, 5081 'integer': long, 5082 'bigint': long, 5083 'float': float, 5084 'double': float, 5085 'date': datetime.date, 5086 'time': datetime.time, 5087 'datetime': datetime.datetime, 5088 'id': long, 5089 'reference': long, 5090 'list:string': list, 5091 'list:integer': list, 5092 'list:reference': list, 5093 } 5094 5095 error_messages = {"javascript_needed": "This must yet be replaced" + 5096 " with javascript in order to work."} 5097
5098 - def __init__(self,db,uri='mongodb://127.0.0.1:5984/db', 5099 pool_size=0, folder=None, db_codec ='UTF-8', 5100 credential_decoder=IDENTITY, driver_args={}, 5101 adapter_args={}, do_connect=True, after_connection=None):
5102 5103 self.db = db 5104 self.uri = uri 5105 if do_connect: self.find_driver(adapter_args) 5106 import random 5107 from bson.objectid import ObjectId 5108 from bson.son import SON 5109 import pymongo.uri_parser 5110 5111 m = pymongo.uri_parser.parse_uri(uri) 5112 5113 self.SON = SON 5114 self.ObjectId = ObjectId 5115 self.random = random 5116 5117 self.dbengine = 'mongodb' 5118 self.folder = folder 5119 db['_lastsql'] = '' 5120 self.db_codec = 'UTF-8' 5121 self._after_connection = after_connection 5122 self.pool_size = pool_size 5123 #this is the minimum amount of replicates that it should wait 5124 # for on insert/update 5125 self.minimumreplication = adapter_args.get('minimumreplication',0) 5126 # by default all inserts and selects are performand asynchronous, 5127 # but now the default is 5128 # synchronous, except when overruled by either this default or 5129 # function parameter 5130 self.safe = adapter_args.get('safe',True) 5131 5132 if isinstance(m,tuple): 5133 m = {"database" : m[1]} 5134 if m.get('database')==None: 5135 raise SyntaxError("Database is required!") 5136 def connector(uri=self.uri,m=m): 5137 try: 5138 # Connection() is deprecated 5139 if hasattr(self.driver, "MongoClient"): 5140 Connection = self.driver.MongoClient 5141 else: 5142 Connection = self.driver.Connection 5143 return Connection(uri)[m.get('database')] 5144 except self.driver.errors.ConnectionFailure: 5145 inst = sys.exc_info()[1] 5146 raise SyntaxError("The connection to " + 5147 uri + " could not be made")
5148 5149 self.reconnect(connector,cursor=False)
5150
5151 - def object_id(self, arg=None):
5152 """ Convert input to a valid Mongodb ObjectId instance 5153 5154 self.object_id("<random>") -> ObjectId (not unique) instance """ 5155 if not arg: 5156 arg = 0 5157 if isinstance(arg, basestring): 5158 # we assume an integer as default input 5159 rawhex = len(arg.replace("0x", "").replace("L", "")) == 24 5160 if arg.isdigit() and (not rawhex): 5161 arg = int(arg) 5162 elif arg == "<random>": 5163 arg = int("0x%sL" % \ 5164 "".join([self.random.choice("0123456789abcdef") \ 5165 for x in range(24)]), 0) 5166 elif arg.isalnum(): 5167 if not arg.startswith("0x"): 5168 arg = "0x%s" % arg 5169 try: 5170 arg = int(arg, 0) 5171 except ValueError, e: 5172 raise ValueError( 5173 "invalid objectid argument string: %s" % e) 5174 else: 5175 raise ValueError("Invalid objectid argument string. " + 5176 "Requires an integer or base 16 value") 5177 elif isinstance(arg, self.ObjectId): 5178 return arg 5179 if not isinstance(arg, (int, long)): 5180 raise TypeError("object_id argument must be of type " + 5181 "ObjectId or an objectid representable integer") 5182 if arg == 0: 5183 hexvalue = "".zfill(24) 5184 else: 5185 hexvalue = hex(arg)[2:].replace("L", "") 5186 return self.ObjectId(hexvalue)
5187
5188 - def represent(self, obj, fieldtype):
5189 value = NoSQLAdapter.represent(self, obj, fieldtype) 5190 if fieldtype =='date': 5191 if value == None: 5192 return value 5193 # this piece of data can be stripped off based on the fieldtype 5194 t = datetime.time(0, 0, 0) 5195 # mongodb doesn't has a date object and so it must datetime, 5196 # string or integer 5197 return datetime.datetime.combine(value, t) 5198 elif fieldtype == 'time': 5199 if value == None: 5200 return value 5201 # this piece of data can be stripped of based on the fieldtype 5202 d = datetime.date(2000, 1, 1) 5203 # mongodb doesn't has a time object and so it must datetime, 5204 # string or integer 5205 return datetime.datetime.combine(d, value) 5206 elif fieldtype == 'list:string' or \ 5207 fieldtype == 'list:integer' or \ 5208 fieldtype == 'list:reference': 5209 return value 5210 return value
5211 5212 # Safe determines whether a asynchronious request is done or a 5213 # synchronious action is done 5214 # For safety, we use by default synchronious requests
5215 - def insert(self, table, fields, safe=None):
5216 if safe==None: 5217 safe = self.safe 5218 ctable = self.connection[table._tablename] 5219 values = dict() 5220 for k, v in fields: 5221 if not k.name in ["id", "safe"]: 5222 fieldname = k.name 5223 fieldtype = table[k.name].type 5224 if ("reference" in fieldtype) or (fieldtype=="id"): 5225 values[fieldname] = self.object_id(v) 5226 else: 5227 values[fieldname] = self.represent(v, fieldtype) 5228 ctable.insert(values, safe=safe) 5229 return int(str(values['_id']), 16)
5230
5231 - def create_table(self, table, migrate=True, fake_migrate=False, 5232 polymodel=None, isCapped=False):
5233 if isCapped: 5234 raise RuntimeError("Not implemented")
5235
5236 - def count(self, query, distinct=None, snapshot=True):
5237 if distinct: 5238 raise RuntimeError("COUNT DISTINCT not supported") 5239 if not isinstance(query,Query): 5240 raise SyntaxError("Not Supported") 5241 tablename = self.get_table(query) 5242 return int(self.select(query,[self.db[tablename]._id], {}, 5243 count=True,snapshot=snapshot)['count'])
5244 # Maybe it would be faster if we just implemented the pymongo 5245 # .count() function which is probably quicker? 5246 # therefor call __select() connection[table].find(query).count() 5247 # Since this will probably reduce the return set? 5248
5249 - def expand(self, expression, field_type=None):
5250 if isinstance(expression, Query): 5251 # any query using 'id':= 5252 # set name as _id (as per pymongo/mongodb primary key) 5253 # convert second arg to an objectid field 5254 # (if its not already) 5255 # if second arg is 0 convert to objectid 5256 if isinstance(expression.first,Field) and \ 5257 ((expression.first.type == 'id') or \ 5258 ("reference" in expression.first.type)): 5259 if expression.first.type == 'id': 5260 expression.first.name = '_id' 5261 # cast to Mongo ObjectId 5262 expression.second = self.object_id(expression.second) 5263 result = expression.op(expression.first, expression.second) 5264 if isinstance(expression, Field): 5265 if expression.type=='id': 5266 result = "_id" 5267 else: 5268 result = expression.name 5269 5270 elif isinstance(expression, (Expression, Query)): 5271 if not expression.second is None: 5272 result = expression.op(expression.first, expression.second) 5273 elif not expression.first is None: 5274 result = expression.op(expression.first) 5275 elif not isinstance(expression.op, str): 5276 result = expression.op() 5277 else: 5278 result = expression.op 5279 elif field_type: 5280 result = str(self.represent(expression,field_type)) 5281 elif isinstance(expression,(list,tuple)): 5282 result = ','.join(self.represent(item,field_type) for 5283 item in expression) 5284 else: 5285 result = expression 5286 return result
5287
5288 - def _select(self, query, fields, attributes):
5289 if 'for_update' in attributes: 5290 logging.warn('mongodb does not support for_update') 5291 for key in set(attributes.keys())-set(('limitby', 5292 'orderby','for_update')): 5293 if attributes[key]!=None: 5294 logging.warn('select attribute not implemented: %s' % key) 5295 5296 new_fields=[] 5297 mongosort_list = [] 5298 5299 # try an orderby attribute 5300 orderby = attributes.get('orderby', False) 5301 limitby = attributes.get('limitby', False) 5302 # distinct = attributes.get('distinct', False) 5303 if orderby: 5304 if isinstance(orderby, (list, tuple)): 5305 orderby = xorify(orderby) 5306 5307 # !!!! need to add 'random' 5308 for f in self.expand(orderby).split(','): 5309 if f.startswith('-'): 5310 mongosort_list.append((f[1:], -1)) 5311 else: 5312 mongosort_list.append((f, 1)) 5313 5314 if limitby: 5315 limitby_skip, limitby_limit = limitby 5316 else: 5317 limitby_skip = limitby_limit = 0 5318 5319 mongofields_dict = self.SON() 5320 mongoqry_dict = {} 5321 for item in fields: 5322 if isinstance(item, SQLALL): 5323 new_fields += item._table 5324 else: 5325 new_fields.append(item) 5326 fields = new_fields 5327 if isinstance(query,Query): 5328 tablename = self.get_table(query) 5329 elif len(fields) != 0: 5330 tablename = fields[0].tablename 5331 else: 5332 raise SyntaxError("The table name could not be found in " + 5333 "the query nor from the select statement.") 5334 5335 mongoqry_dict = self.expand(query) 5336 fields = fields or self.db[tablename] 5337 for field in fields: 5338 mongofields_dict[field.name] = 1 5339 5340 return tablename, mongoqry_dict, mongofields_dict, mongosort_list, \ 5341 limitby_limit, limitby_skip
5342 5343
5344 - def select(self, query, fields, attributes, count=False, 5345 snapshot=False):
5346 # TODO: support joins 5347 tablename, mongoqry_dict, mongofields_dict, mongosort_list, \ 5348 limitby_limit, limitby_skip = self._select(query, fields, attributes) 5349 ctable = self.connection[tablename] 5350 5351 if count: 5352 return {'count' : ctable.find( 5353 mongoqry_dict, mongofields_dict, 5354 skip=limitby_skip, limit=limitby_limit, 5355 sort=mongosort_list, snapshot=snapshot).count()} 5356 else: 5357 # pymongo cursor object 5358 mongo_list_dicts = ctable.find(mongoqry_dict, 5359 mongofields_dict, skip=limitby_skip, 5360 limit=limitby_limit, sort=mongosort_list, 5361 snapshot=snapshot) 5362 rows = [] 5363 # populate row in proper order 5364 # Here we replace ._id with .id to follow the standard naming 5365 colnames = [] 5366 newnames = [] 5367 for field in fields: 5368 colname = str(field) 5369 colnames.append(colname) 5370 tablename, fieldname = colname.split(".") 5371 if fieldname == "_id": 5372 # Mongodb reserved uuid key 5373 field.name = "id" 5374 newnames.append(".".join((tablename, field.name))) 5375 5376 for record in mongo_list_dicts: 5377 row=[] 5378 for colname in colnames: 5379 tablename, fieldname = colname.split(".") 5380 # switch to Mongo _id uuids for retrieving 5381 # record id's 5382 if fieldname == "id": fieldname = "_id" 5383 if fieldname in record: 5384 if isinstance(record[fieldname], 5385 self.ObjectId): 5386 value = int(str(record[fieldname]), 16) 5387 else: 5388 value = record[fieldname] 5389 else: 5390 value = None 5391 row.append(value) 5392 rows.append(row) 5393 processor = attributes.get('processor', self.parse) 5394 result = processor(rows, fields, newnames, False) 5395 return result
5396 5397
5398 - def INVERT(self, first):
5399 #print "in invert first=%s" % first 5400 return '-%s' % self.expand(first)
5401
5402 - def drop(self, table, mode=''):
5403 ctable = self.connection[table._tablename] 5404 ctable.drop()
5405 5406
5407 - def truncate(self, table, mode, safe=None):
5408 if safe == None: 5409 safe=self.safe 5410 ctable = self.connection[table._tablename] 5411 ctable.remove(None, safe=True)
5412
5413 - def oupdate(self, tablename, query, fields):
5414 if not isinstance(query, Query): 5415 raise SyntaxError("Not Supported") 5416 filter = None 5417 if query: 5418 filter = self.expand(query) 5419 modify = {'$set': dict((k.name, self.represent(v, k.type)) for 5420 k, v in fields)} 5421 return modify, filter
5422
5423 - def update(self, tablename, query, fields, safe=None):
5424 if safe == None: 5425 safe = self.safe 5426 # return amount of adjusted rows or zero, but no exceptions 5427 # @ related not finding the result 5428 if not isinstance(query, Query): 5429 raise RuntimeError("Not implemented") 5430 amount = self.count(query, False) 5431 modify, filter = self.oupdate(tablename, query, fields) 5432 try: 5433 result = self.connection[tablename].update(filter, 5434 modify, multi=True, safe=safe) 5435 if safe: 5436 try: 5437 # if result count is available fetch it 5438 return result["n"] 5439 except (KeyError, AttributeError, TypeError): 5440 return amount 5441 else: 5442 return amount 5443 except Exception, e: 5444 # TODO Reverse update query to verifiy that the query succeded 5445 raise RuntimeError("uncaught exception when updating rows: %s" % e)
5446 5447 #this function returns a dict with the where clause and update fields
5448 - def _update(self,tablename,query,fields):
5449 return str(self.oupdate(tablename, query, fields))
5450
5451 - def delete(self, tablename, query, safe=None):
5452 if safe is None: 5453 safe = self.safe 5454 amount = 0 5455 amount = self.count(query, False) 5456 if not isinstance(query, Query): 5457 raise RuntimeError("query type %s is not supported" % \ 5458 type(query)) 5459 filter = self.expand(query) 5460 self._delete(tablename, filter, safe=safe) 5461 return amount
5462
5463 - def _delete(self, tablename, filter, safe=None):
5464 return self.connection[tablename].remove(filter, safe=safe)
5465
5466 - def bulk_insert(self, table, items):
5467 return [self.insert(table,item) for item in items]
5468 5469 # TODO This will probably not work:(
5470 - def NOT(self, first):
5471 result = {} 5472 result["$not"] = self.expand(first) 5473 return result
5474
5475 - def AND(self,first,second):
5476 f = self.expand(first) 5477 s = self.expand(second) 5478 f.update(s) 5479 return f
5480
5481 - def OR(self,first,second):
5482 # pymongo expects: .find({'$or': [{'name':'1'}, {'name':'2'}]}) 5483 result = {} 5484 f = self.expand(first) 5485 s = self.expand(second) 5486 result['$or'] = [f,s] 5487 return result
5488
5489 - def BELONGS(self, first, second):
5490 if isinstance(second, str): 5491 return {self.expand(first) : {"$in" : [ second[:-1]]} } 5492 elif second==[] or second==(): 5493 return {1:0} 5494 items = [self.expand(item, first.type) for item in second] 5495 return {self.expand(first) : {"$in" : items} }
5496
5497 - def EQ(self,first,second):
5498 result = {} 5499 result[self.expand(first)] = self.expand(second) 5500 return result
5501
5502 - def NE(self, first, second=None):
5503 result = {} 5504 result[self.expand(first)] = {'$ne': self.expand(second)} 5505 return result
5506
5507 - def LT(self,first,second=None):
5508 if second is None: 5509 raise RuntimeError("Cannot compare %s < None" % first) 5510 result = {} 5511 result[self.expand(first)] = {'$lt': self.expand(second)} 5512 return result
5513
5514 - def LE(self,first,second=None):
5515 if second is None: 5516 raise RuntimeError("Cannot compare %s <= None" % first) 5517 result = {} 5518 result[self.expand(first)] = {'$lte': self.expand(second)} 5519 return result
5520
5521 - def GT(self,first,second):
5522 result = {} 5523 result[self.expand(first)] = {'$gt': self.expand(second)} 5524 return result
5525
5526 - def GE(self,first,second=None):
5527 if second is None: 5528 raise RuntimeError("Cannot compare %s >= None" % first) 5529 result = {} 5530 result[self.expand(first)] = {'$gte': self.expand(second)} 5531 return result
5532
5533 - def ADD(self, first, second):
5534 raise NotImplementedError(self.error_messages["javascript_needed"]) 5535 return '%s + %s' % (self.expand(first), 5536 self.expand(second, first.type))
5537
5538 - def SUB(self, first, second):
5539 raise NotImplementedError(self.error_messages["javascript_needed"]) 5540 return '(%s - %s)' % (self.expand(first), 5541 self.expand(second, first.type))
5542
5543 - def MUL(self, first, second):
5544 raise NotImplementedError(self.error_messages["javascript_needed"]) 5545 return '(%s * %s)' % (self.expand(first), 5546 self.expand(second, first.type))
5547
5548 - def DIV(self, first, second):
5549 raise NotImplementedError(self.error_messages["javascript_needed"]) 5550 return '(%s / %s)' % (self.expand(first), 5551 self.expand(second, first.type))
5552
5553 - def MOD(self, first, second):
5554 raise NotImplementedError(self.error_messages["javascript_needed"]) 5555 return '(%s %% %s)' % (self.expand(first), 5556 self.expand(second, first.type))
5557
5558 - def AS(self, first, second):
5559 raise NotImplementedError(self.error_messages["javascript_needed"]) 5560 return '%s AS %s' % (self.expand(first), second)
5561 5562 # We could implement an option that simulates a full featured SQL 5563 # database. But I think the option should be set explicit or 5564 # implemented as another library.
5565 - def ON(self, first, second):
5566 raise NotImplementedError("This is not possible in NoSQL" + 5567 " but can be simulated with a wrapper.") 5568 return '%s ON %s' % (self.expand(first), self.expand(second))
5569 5570 # BLOW ARE TWO IMPLEMENTATIONS OF THE SAME FUNCITONS 5571 # WHICH ONE IS BEST? 5572
5573 - def COMMA(self, first, second):
5574 return '%s, %s' % (self.expand(first), self.expand(second))
5575
5576 - def LIKE(self, first, second):
5577 #escaping regex operators? 5578 return {self.expand(first): ('%s' % \ 5579 self.expand(second, 'string').replace('%','/'))}
5580
5581 - def STARTSWITH(self, first, second):
5582 #escaping regex operators? 5583 return {self.expand(first): ('/^%s/' % \ 5584 self.expand(second, 'string'))}
5585
5586 - def ENDSWITH(self, first, second):
5587 #escaping regex operators? 5588 return {self.expand(first): ('/%s^/' % \ 5589 self.expand(second, 'string'))}
5590
5591 - def CONTAINS(self, first, second, case_sensitive=False):
5592 # silently ignore, only case sensitive 5593 # There is a technical difference, but mongodb doesn't support 5594 # that, but the result will be the same 5595 return {self.expand(first) : ('/%s/' % \ 5596 self.expand(second, 'string'))}
5597
5598 - def LIKE(self, first, second):
5599 import re 5600 return {self.expand(first): {'$regex': \ 5601 re.escape(self.expand(second, 5602 'string')).replace('%','.*')}}
5603 5604 #TODO verify full compatibilty with official SQL Like operator
5605 - def STARTSWITH(self, first, second):
5606 #TODO Solve almost the same problem as with endswith 5607 import re 5608 return {self.expand(first): {'$regex' : '^' + 5609 re.escape(self.expand(second, 5610 'string'))}}
5611 5612 #TODO verify full compatibilty with official SQL Like operator
5613 - def ENDSWITH(self, first, second):
5614 #escaping regex operators? 5615 #TODO if searched for a name like zsa_corbitt and the function 5616 # is endswith('a') then this is also returned. 5617 # Aldo it end with a t 5618 import re 5619 return {self.expand(first): {'$regex': \ 5620 re.escape(self.expand(second, 'string')) + '$'}}
5621 5622 #TODO verify full compatibilty with official oracle contains operator
5623 - def CONTAINS(self, first, second, case_sensitive=False):
5624 # silently ignore, only case sensitive 5625 #There is a technical difference, but mongodb doesn't support 5626 # that, but the result will be the same 5627 #TODO contains operators need to be transformed to Regex 5628 return {self.expand(first) : {' $regex': \ 5629 ".*" + re.escape(self.expand(second, 'string')) + ".*"}}
5630
5631 5632 -class IMAPAdapter(NoSQLAdapter):
5633 drivers = ('imaplib',) 5634 5635 """ IMAP server adapter 5636 5637 This class is intended as an interface with 5638 email IMAP servers to perform simple queries in the 5639 web2py DAL query syntax, so email read, search and 5640 other related IMAP mail services (as those implemented 5641 by brands like Google(r), and Yahoo!(r) 5642 can be managed from web2py applications. 5643 5644 The code uses examples by Yuji Tomita on this post: 5645 http://yuji.wordpress.com/2011/06/22/python-imaplib-imap-example-with-gmail/#comment-1137 5646 and is based in docs for Python imaplib, python email 5647 and email IETF's (i.e. RFC2060 and RFC3501) 5648 5649 This adapter was tested with a small set of operations with Gmail(r). Other 5650 services requests could raise command syntax and response data issues. 5651 5652 It creates its table and field names "statically", 5653 meaning that the developer should leave the table and field 5654 definitions to the DAL instance by calling the adapter's 5655 .define_tables() method. The tables are defined with the 5656 IMAP server mailbox list information. 5657 5658 .define_tables() returns a dictionary mapping dal tablenames 5659 to the server mailbox names with the following structure: 5660 5661 {<tablename>: str <server mailbox name>} 5662 5663 Here is a list of supported fields: 5664 5665 Field Type Description 5666 ################################################################ 5667 uid string 5668 answered boolean Flag 5669 created date 5670 content list:string A list of text or html parts 5671 to string 5672 cc string 5673 bcc string 5674 size integer the amount of octets of the message* 5675 deleted boolean Flag 5676 draft boolean Flag 5677 flagged boolean Flag 5678 sender string 5679 recent boolean Flag 5680 seen boolean Flag 5681 subject string 5682 mime string The mime header declaration 5683 email string The complete RFC822 message** 5684 attachments <type list> Each non text part as dict 5685 encoding string The main detected encoding 5686 5687 *At the application side it is measured as the length of the RFC822 5688 message string 5689 5690 WARNING: As row id's are mapped to email sequence numbers, 5691 make sure your imap client web2py app does not delete messages 5692 during select or update actions, to prevent 5693 updating or deleting different messages. 5694 Sequence numbers change whenever the mailbox is updated. 5695 To avoid this sequence numbers issues, it is recommended the use 5696 of uid fields in query references (although the update and delete 5697 in separate actions rule still applies). 5698 5699 # This is the code recommended to start imap support 5700 # at the app's model: 5701 5702 imapdb = DAL("imap://user:password@server:port", pool_size=1) # port 993 for ssl 5703 imapdb.define_tables() 5704 5705 Here is an (incomplete) list of possible imap commands: 5706 5707 # Count today's unseen messages 5708 # smaller than 6000 octets from the 5709 # inbox mailbox 5710 5711 q = imapdb.INBOX.seen == False 5712 q &= imapdb.INBOX.created == datetime.date.today() 5713 q &= imapdb.INBOX.size < 6000 5714 unread = imapdb(q).count() 5715 5716 # Fetch last query messages 5717 rows = imapdb(q).select() 5718 5719 # it is also possible to filter query select results with limitby and 5720 # sequences of mailbox fields 5721 5722 set.select(<fields sequence>, limitby=(<int>, <int>)) 5723 5724 # Mark last query messages as seen 5725 messages = [row.uid for row in rows] 5726 seen = imapdb(imapdb.INBOX.uid.belongs(messages)).update(seen=True) 5727 5728 # Delete messages in the imap database that have mails from mr. Gumby 5729 5730 deleted = 0 5731 for mailbox in imapdb.tables 5732 deleted += imapdb(imapdb[mailbox].sender.contains("gumby")).delete() 5733 5734 # It is possible also to mark messages for deletion instead of ereasing them 5735 # directly with set.update(deleted=True) 5736 5737 5738 # This object give access 5739 # to the adapter auto mailbox 5740 # mapped names (which native 5741 # mailbox has what table name) 5742 5743 imapdb.mailboxes <dict> # tablename, server native name pairs 5744 5745 # To retrieve a table native mailbox name use: 5746 imapdb.<table>.mailbox 5747 5748 ### New features v2.4.1: 5749 5750 # Declare mailboxes statically with tablename, name pairs 5751 # This avoids the extra server names retrieval 5752 5753 imapdb.define_tables({"inbox": "INBOX"}) 5754 5755 # Selects without content/attachments/email columns will only 5756 # fetch header and flags 5757 5758 imapdb(q).select(imapdb.INBOX.sender, imapdb.INBOX.subject) 5759 """ 5760 5761 types = { 5762 'string': str, 5763 'text': str, 5764 'date': datetime.date, 5765 'datetime': datetime.datetime, 5766 'id': long, 5767 'boolean': bool, 5768 'integer': int, 5769 'bigint': long, 5770 'blob': str, 5771 'list:string': str, 5772 } 5773 5774 dbengine = 'imap' 5775 5776 REGEX_URI = re.compile('^(?P<user>[^:]+)(\:(?P<password>[^@]*))?@(?P<host>[^\:@]+)(\:(?P<port>[0-9]+))?$') 5777
5778 - def __init__(self, 5779 db, 5780 uri, 5781 pool_size=0, 5782 folder=None, 5783 db_codec ='UTF-8', 5784 credential_decoder=IDENTITY, 5785 driver_args={}, 5786 adapter_args={}, 5787 do_connect=True, 5788 after_connection=None):
5789 5790 # db uri: user@example.com:password@imap.server.com:123 5791 # TODO: max size adapter argument for preventing large mail transfers 5792 5793 self.db = db 5794 self.uri = uri 5795 if do_connect: self.find_driver(adapter_args) 5796 self.pool_size=pool_size 5797 self.folder = folder 5798 self.db_codec = db_codec 5799 self._after_connection = after_connection 5800 self.credential_decoder = credential_decoder 5801 self.driver_args = driver_args 5802 self.adapter_args = adapter_args 5803 self.mailbox_size = None 5804 self.static_names = None 5805 self.charset = sys.getfilesystemencoding() 5806 # imap class 5807 self.imap4 = None 5808 uri = uri.split("://")[1] 5809 5810 """ MESSAGE is an identifier for sequence number""" 5811 5812 self.flags = ['\\Deleted', '\\Draft', '\\Flagged', 5813 '\\Recent', '\\Seen', '\\Answered'] 5814 self.search_fields = { 5815 'id': 'MESSAGE', 'created': 'DATE', 5816 'uid': 'UID', 'sender': 'FROM', 5817 'to': 'TO', 'cc': 'CC', 5818 'bcc': 'BCC', 'content': 'TEXT', 5819 'size': 'SIZE', 'deleted': '\\Deleted', 5820 'draft': '\\Draft', 'flagged': '\\Flagged', 5821 'recent': '\\Recent', 'seen': '\\Seen', 5822 'subject': 'SUBJECT', 'answered': '\\Answered', 5823 'mime': None, 'email': None, 5824 'attachments': None 5825 } 5826 5827 db['_lastsql'] = '' 5828 5829 m = self.REGEX_URI.match(uri) 5830 user = m.group('user') 5831 password = m.group('password') 5832 host = m.group('host') 5833 port = int(m.group('port')) 5834 over_ssl = False 5835 if port==993: 5836 over_ssl = True 5837 5838 driver_args.update(host=host,port=port, password=password, user=user) 5839 def connector(driver_args=driver_args): 5840 # it is assumed sucessful authentication alLways 5841 # TODO: support direct connection and login tests 5842 if over_ssl: 5843 self.imap4 = self.driver.IMAP4_SSL 5844 else: 5845 self.imap4 = self.driver.IMAP4 5846 connection = self.imap4(driver_args["host"], driver_args["port"]) 5847 data = connection.login(driver_args["user"], driver_args["password"]) 5848 5849 # static mailbox list 5850 connection.mailbox_names = None 5851 5852 # dummy cursor function 5853 connection.cursor = lambda : True 5854 5855 return connection
5856 5857 self.db.define_tables = self.define_tables 5858 self.connector = connector 5859 if do_connect: self.reconnect()
5860
5861 - def reconnect(self, f=None, cursor=True):
5862 """ 5863 IMAP4 Pool connection method 5864 5865 imap connection lacks of self cursor command. 5866 A custom command should be provided as a replacement 5867 for connection pooling to prevent uncaught remote session 5868 closing 5869 5870 """ 5871 if getattr(self,'connection',None) != None: 5872 return 5873 if f is None: 5874 f = self.connector 5875 5876 if not self.pool_size: 5877 self.connection = f() 5878 self.cursor = cursor and self.connection.cursor() 5879 else: 5880 POOLS = ConnectionPool.POOLS 5881 uri = self.uri 5882 while True: 5883 GLOBAL_LOCKER.acquire() 5884 if not uri in POOLS: 5885 POOLS[uri] = [] 5886 if POOLS[uri]: 5887 self.connection = POOLS[uri].pop() 5888 GLOBAL_LOCKER.release() 5889 self.cursor = cursor and self.connection.cursor() 5890 if self.cursor and self.check_active_connection: 5891 try: 5892 # check if connection is alive or close it 5893 result, data = self.connection.list() 5894 except: 5895 # Possible connection reset error 5896 # TODO: read exception class 5897 self.connection = f() 5898 break 5899 else: 5900 GLOBAL_LOCKER.release() 5901 self.connection = f() 5902 self.cursor = cursor and self.connection.cursor() 5903 break 5904 self.after_connection_hook()
5905
5906 - def get_last_message(self, tablename):
5907 last_message = None 5908 # request mailbox list to the server 5909 # if needed 5910 if not isinstance(self.connection.mailbox_names, dict): 5911 self.get_mailboxes() 5912 try: 5913 result = self.connection.select(self.connection.mailbox_names[tablename]) 5914 last_message = int(result[1][0]) 5915 except (IndexError, ValueError, TypeError, KeyError): 5916 e = sys.exc_info()[1] 5917 LOGGER.debug("Error retrieving the last mailbox sequence number. %s" % str(e)) 5918 return last_message
5919
5920 - def get_uid_bounds(self, tablename):
5921 if not isinstance(self.connection.mailbox_names, dict): 5922 self.get_mailboxes() 5923 # fetch first and last messages 5924 # return (first, last) messages uid's 5925 last_message = self.get_last_message(tablename) 5926 result, data = self.connection.uid("search", None, "(ALL)") 5927 uid_list = data[0].strip().split() 5928 if len(uid_list) <= 0: 5929 return None 5930 else: 5931 return (uid_list[0], uid_list[-1])
5932
5933 - def convert_date(self, date, add=None):
5934 if add is None: 5935 add = datetime.timedelta() 5936 """ Convert a date object to a string 5937 with d-Mon-Y style for IMAP or the inverse 5938 case 5939 5940 add <timedelta> adds to the date object 5941 """ 5942 months = [None, "Jan","Feb","Mar","Apr","May","Jun", 5943 "Jul", "Aug","Sep","Oct","Nov","Dec"] 5944 if isinstance(date, basestring): 5945 # Prevent unexpected date response format 5946 try: 5947 dayname, datestring = date.split(",") 5948 except (ValueError): 5949 LOGGER.debug("Could not parse date text: %s" % date) 5950 return None 5951 date_list = datestring.strip().split() 5952 year = int(date_list[2]) 5953 month = months.index(date_list[1]) 5954 day = int(date_list[0]) 5955 hms = map(int, date_list[3].split(":")) 5956 return datetime.datetime(year, month, day, 5957 hms[0], hms[1], hms[2]) + add 5958 elif isinstance(date, (datetime.datetime, datetime.date)): 5959 return (date + add).strftime("%d-%b-%Y") 5960 5961 else: 5962 return None
5963 5964 @staticmethod
5965 - def header_represent(f, r):
5966 from email.header import decode_header 5967 text, encoding = decode_header(f)[0] 5968 return text
5969
5970 - def encode_text(self, text, charset, errors="replace"):
5971 """ convert text for mail to unicode""" 5972 if text is None: 5973 text = "" 5974 else: 5975 if isinstance(text, str): 5976 if charset is None: 5977 text = unicode(text, "utf-8", errors) 5978 else: 5979 text = unicode(text, charset, errors) 5980 else: 5981 raise Exception("Unsupported mail text type %s" % type(text)) 5982 return text.encode("utf-8")
5983
5984 - def get_charset(self, message):
5985 charset = message.get_content_charset() 5986 return charset
5987
5988 - def get_mailboxes(self):
5989 """ Query the mail database for mailbox names """ 5990 if self.static_names: 5991 # statically defined mailbox names 5992 self.connection.mailbox_names = self.static_names 5993 return self.static_names.keys() 5994 5995 mailboxes_list = self.connection.list() 5996 self.connection.mailbox_names = dict() 5997 mailboxes = list() 5998 x = 0 5999 for item in mailboxes_list[1]: 6000 x = x + 1 6001 item = item.strip() 6002 if not "NOSELECT" in item.upper(): 6003 sub_items = item.split("\"") 6004 sub_items = [sub_item for sub_item in sub_items \ 6005 if len(sub_item.strip()) > 0] 6006 # mailbox = sub_items[len(sub_items) -1] 6007 mailbox = sub_items[-1] 6008 # remove unwanted characters and store original names 6009 # Don't allow leading non alphabetic characters 6010 mailbox_name = re.sub('^[_0-9]*', '', re.sub('[^_\w]','',re.sub('[/ ]','_',mailbox))) 6011 mailboxes.append(mailbox_name) 6012 self.connection.mailbox_names[mailbox_name] = mailbox 6013 6014 return mailboxes
6015
6016 - def get_query_mailbox(self, query):
6017 nofield = True 6018 tablename = None 6019 attr = query 6020 while nofield: 6021 if hasattr(attr, "first"): 6022 attr = attr.first 6023 if isinstance(attr, Field): 6024 return attr.tablename 6025 elif isinstance(attr, Query): 6026 pass 6027 else: 6028 return None 6029 else: 6030 return None 6031 return tablename
6032
6033 - def is_flag(self, flag):
6034 if self.search_fields.get(flag, None) in self.flags: 6035 return True 6036 else: 6037 return False
6038
6039 - def define_tables(self, mailbox_names=None):
6040 """ 6041 Auto create common IMAP fileds 6042 6043 This function creates fields definitions "statically" 6044 meaning that custom fields as in other adapters should 6045 not be supported and definitions handled on a service/mode 6046 basis (local syntax for Gmail(r), Ymail(r) 6047 6048 Returns a dictionary with tablename, server native mailbox name 6049 pairs. 6050 """ 6051 if mailbox_names: 6052 # optional statically declared mailboxes 6053 self.static_names = mailbox_names 6054 else: 6055 self.static_names = None 6056 if not isinstance(self.connection.mailbox_names, dict): 6057 self.get_mailboxes() 6058 6059 names = self.connection.mailbox_names.keys() 6060 6061 for name in names: 6062 self.db.define_table("%s" % name, 6063 Field("uid", "string", writable=False), 6064 Field("answered", "boolean"), 6065 Field("created", "datetime", writable=False), 6066 Field("content", "list:string", writable=False), 6067 Field("to", "string", writable=False), 6068 Field("cc", "string", writable=False), 6069 Field("bcc", "string", writable=False), 6070 Field("size", "integer", writable=False), 6071 Field("deleted", "boolean"), 6072 Field("draft", "boolean"), 6073 Field("flagged", "boolean"), 6074 Field("sender", "string", writable=False), 6075 Field("recent", "boolean", writable=False), 6076 Field("seen", "boolean"), 6077 Field("subject", "string", writable=False), 6078 Field("mime", "string", writable=False), 6079 Field("email", "string", writable=False, readable=False), 6080 Field("attachments", list, writable=False, readable=False), 6081 Field("encoding") 6082 ) 6083 6084 # Set a special _mailbox attribute for storing 6085 # native mailbox names 6086 self.db[name].mailbox = \ 6087 self.connection.mailbox_names[name] 6088 6089 # decode quoted printable 6090 self.db[name].to.represent = self.db[name].cc.represent = \ 6091 self.db[name].bcc.represent = self.db[name].sender.represent = \ 6092 self.db[name].subject.represent = self.header_represent 6093 6094 # Set the db instance mailbox collections 6095 self.db.mailboxes = self.connection.mailbox_names 6096 return self.db.mailboxes
6097
6098 - def create_table(self, *args, **kwargs):
6099 # not implemented 6100 # but required by DAL 6101 pass
6102
6103 - def _select(self, query, fields, attributes):
6104 if use_common_filters(query): 6105 query = self.common_filter(query, [self.get_query_mailbox(query),]) 6106 return str(query)
6107
6108 - def select(self, query, fields, attributes):
6109 """ Search and Fetch records and return web2py rows 6110 """ 6111 # move this statement elsewhere (upper-level) 6112 if use_common_filters(query): 6113 query = self.common_filter(query, [self.get_query_mailbox(query),]) 6114 6115 import email 6116 # get records from imap server with search + fetch 6117 # convert results to a dictionary 6118 tablename = None 6119 fetch_results = list() 6120 6121 if isinstance(query, Query): 6122 tablename = self.get_table(query) 6123 mailbox = self.connection.mailbox_names.get(tablename, None) 6124 if mailbox is None: 6125 raise ValueError("Mailbox name not found: %s" % mailbox) 6126 else: 6127 # select with readonly 6128 result, selected = self.connection.select(mailbox, True) 6129 if result != "OK": 6130 raise Exception("IMAP error: %s" % selected) 6131 self.mailbox_size = int(selected[0]) 6132 search_query = "(%s)" % str(query).strip() 6133 search_result = self.connection.uid("search", None, search_query) 6134 # Normal IMAP response OK is assumed (change this) 6135 if search_result[0] == "OK": 6136 # For "light" remote server responses just get the first 6137 # ten records (change for non-experimental implementation) 6138 # However, light responses are not guaranteed with this 6139 # approach, just fewer messages. 6140 limitby = attributes.get('limitby', None) 6141 messages_set = search_result[1][0].split() 6142 # descending order 6143 messages_set.reverse() 6144 if limitby is not None: 6145 # TODO: orderby, asc/desc, limitby from complete message set 6146 messages_set = messages_set[int(limitby[0]):int(limitby[1])] 6147 6148 # keep the requests small for header/flags 6149 if any([(field.name in ["content", "size", 6150 "attachments", "email"]) for 6151 field in fields]): 6152 imap_fields = "(RFC822 FLAGS)" 6153 else: 6154 imap_fields = "(RFC822.HEADER FLAGS)" 6155 6156 if len(messages_set) > 0: 6157 # create fetch results object list 6158 # fetch each remote message and store it in memmory 6159 # (change to multi-fetch command syntax for faster 6160 # transactions) 6161 for uid in messages_set: 6162 # fetch the RFC822 message body 6163 typ, data = self.connection.uid("fetch", uid, imap_fields) 6164 if typ == "OK": 6165 fr = {"message": int(data[0][0].split()[0]), 6166 "uid": int(uid), 6167 "email": email.message_from_string(data[0][1]), 6168 "raw_message": data[0][1]} 6169 fr["multipart"] = fr["email"].is_multipart() 6170 # fetch flags for the message 6171 fr["flags"] = self.driver.ParseFlags(data[1]) 6172 fetch_results.append(fr) 6173 else: 6174 # error retrieving the message body 6175 raise Exception("IMAP error retrieving the body: %s" % data) 6176 else: 6177 raise Exception("IMAP search error: %s" % search_result[1]) 6178 elif isinstance(query, (Expression, basestring)): 6179 raise NotImplementedError() 6180 else: 6181 raise TypeError("Unexpected query type") 6182 6183 imapqry_dict = {} 6184 imapfields_dict = {} 6185 6186 if len(fields) == 1 and isinstance(fields[0], SQLALL): 6187 allfields = True 6188 elif len(fields) == 0: 6189 allfields = True 6190 else: 6191 allfields = False 6192 if allfields: 6193 colnames = ["%s.%s" % (tablename, field) for field in self.search_fields.keys()] 6194 else: 6195 colnames = ["%s.%s" % (tablename, field.name) for field in fields] 6196 6197 for k in colnames: 6198 imapfields_dict[k] = k 6199 6200 imapqry_list = list() 6201 imapqry_array = list() 6202 for fr in fetch_results: 6203 attachments = [] 6204 content = [] 6205 size = 0 6206 n = int(fr["message"]) 6207 item_dict = dict() 6208 message = fr["email"] 6209 uid = fr["uid"] 6210 charset = self.get_charset(message) 6211 flags = fr["flags"] 6212 raw_message = fr["raw_message"] 6213 # Return messages data mapping static fields 6214 # and fetched results. Mapping should be made 6215 # outside the select function (with auxiliary 6216 # instance methods) 6217 6218 # pending: search flags states trough the email message 6219 # instances for correct output 6220 6221 # preserve subject encoding (ASCII/quoted printable) 6222 6223 if "%s.id" % tablename in colnames: 6224 item_dict["%s.id" % tablename] = n 6225 if "%s.created" % tablename in colnames: 6226 item_dict["%s.created" % tablename] = self.convert_date(message["Date"]) 6227 if "%s.uid" % tablename in colnames: 6228 item_dict["%s.uid" % tablename] = uid 6229 if "%s.sender" % tablename in colnames: 6230 # If there is no encoding found in the message header 6231 # force utf-8 replacing characters (change this to 6232 # module's defaults). Applies to .sender, .to, .cc and .bcc fields 6233 item_dict["%s.sender" % tablename] = message["From"] 6234 if "%s.to" % tablename in colnames: 6235 item_dict["%s.to" % tablename] = message["To"] 6236 if "%s.cc" % tablename in colnames: 6237 if "Cc" in message.keys(): 6238 item_dict["%s.cc" % tablename] = message["Cc"] 6239 else: 6240 item_dict["%s.cc" % tablename] = "" 6241 if "%s.bcc" % tablename in colnames: 6242 if "Bcc" in message.keys(): 6243 item_dict["%s.bcc" % tablename] = message["Bcc"] 6244 else: 6245 item_dict["%s.bcc" % tablename] = "" 6246 if "%s.deleted" % tablename in colnames: 6247 item_dict["%s.deleted" % tablename] = "\\Deleted" in flags 6248 if "%s.draft" % tablename in colnames: 6249 item_dict["%s.draft" % tablename] = "\\Draft" in flags 6250 if "%s.flagged" % tablename in colnames: 6251 item_dict["%s.flagged" % tablename] = "\\Flagged" in flags 6252 if "%s.recent" % tablename in colnames: 6253 item_dict["%s.recent" % tablename] = "\\Recent" in flags 6254 if "%s.seen" % tablename in colnames: 6255 item_dict["%s.seen" % tablename] = "\\Seen" in flags 6256 if "%s.subject" % tablename in colnames: 6257 item_dict["%s.subject" % tablename] = message["Subject"] 6258 if "%s.answered" % tablename in colnames: 6259 item_dict["%s.answered" % tablename] = "\\Answered" in flags 6260 if "%s.mime" % tablename in colnames: 6261 item_dict["%s.mime" % tablename] = message.get_content_type() 6262 if "%s.encoding" % tablename in colnames: 6263 item_dict["%s.encoding" % tablename] = charset 6264 6265 # Here goes the whole RFC822 body as an email instance 6266 # for controller side custom processing 6267 # The message is stored as a raw string 6268 # >> email.message_from_string(raw string) 6269 # returns a Message object for enhanced object processing 6270 if "%s.email" % tablename in colnames: 6271 # WARNING: no encoding performed (raw message) 6272 item_dict["%s.email" % tablename] = raw_message 6273 6274 # Size measure as suggested in a Velocity Reviews post 6275 # by Tim Williams: "how to get size of email attachment" 6276 # Note: len() and server RFC822.SIZE reports doesn't match 6277 # To retrieve the server size for representation would add a new 6278 # fetch transaction to the process 6279 for part in message.walk(): 6280 maintype = part.get_content_maintype() 6281 if ("%s.attachments" % tablename in colnames) or \ 6282 ("%s.content" % tablename in colnames): 6283 if "%s.attachments" % tablename in colnames: 6284 if not ("text" in maintype): 6285 payload = part.get_payload(decode=True) 6286 if payload: 6287 attachment = { 6288 "payload": payload, 6289 "filename": part.get_filename(), 6290 "encoding": part.get_content_charset(), 6291 "mime": part.get_content_type(), 6292 "disposition": part["Content-Disposition"]} 6293 attachments.append(attachment) 6294 if "%s.content" % tablename in colnames: 6295 payload = part.get_payload(decode=True) 6296 part_charset = self.get_charset(part) 6297 if "text" in maintype: 6298 if payload: 6299 content.append(self.encode_text(payload, part_charset)) 6300 if "%s.size" % tablename in colnames: 6301 if part is not None: 6302 size += len(str(part)) 6303 item_dict["%s.content" % tablename] = bar_encode(content) 6304 item_dict["%s.attachments" % tablename] = attachments 6305 item_dict["%s.size" % tablename] = size 6306 imapqry_list.append(item_dict) 6307 6308 # extra object mapping for the sake of rows object 6309 # creation (sends an array or lists) 6310 for item_dict in imapqry_list: 6311 imapqry_array_item = list() 6312 for fieldname in colnames: 6313 imapqry_array_item.append(item_dict[fieldname]) 6314 imapqry_array.append(imapqry_array_item) 6315 6316 # parse result and return a rows object 6317 colnames = colnames 6318 processor = attributes.get('processor',self.parse) 6319 return processor(imapqry_array, fields, colnames)
6320
6321 - def _update(self, tablename, query, fields, commit=False):
6322 # TODO: the adapter should implement an .expand method 6323 commands = list() 6324 if use_common_filters(query): 6325 query = self.common_filter(query, [tablename,]) 6326 mark = [] 6327 unmark = [] 6328 if query: 6329 for item in fields: 6330 field = item[0] 6331 name = field.name 6332 value = item[1] 6333 if self.is_flag(name): 6334 flag = self.search_fields[name] 6335 if (value is not None) and (flag != "\\Recent"): 6336 if value: 6337 mark.append(flag) 6338 else: 6339 unmark.append(flag) 6340 result, data = self.connection.select( 6341 self.connection.mailbox_names[tablename]) 6342 string_query = "(%s)" % query 6343 result, data = self.connection.search(None, string_query) 6344 store_list = [item.strip() for item in data[0].split() 6345 if item.strip().isdigit()] 6346 # build commands for marked flags 6347 for number in store_list: 6348 result = None 6349 if len(mark) > 0: 6350 commands.append((number, "+FLAGS", "(%s)" % " ".join(mark))) 6351 if len(unmark) > 0: 6352 commands.append((number, "-FLAGS", "(%s)" % " ".join(unmark))) 6353 return commands
6354
6355 - def update(self, tablename, query, fields):
6356 rowcount = 0 6357 commands = self._update(tablename, query, fields) 6358 for command in commands: 6359 result, data = self.connection.store(*command) 6360 if result == "OK": 6361 rowcount += 1 6362 else: 6363 raise Exception("IMAP storing error: %s" % data) 6364 return rowcount
6365
6366 - def _count(self, query, distinct=None):
6367 raise NotImplementedError()
6368
6369 - def count(self,query,distinct=None):
6370 counter = 0 6371 tablename = self.get_query_mailbox(query) 6372 if query and tablename is not None: 6373 if use_common_filters(query): 6374 query = self.common_filter(query, [tablename,]) 6375 result, data = self.connection.select(self.connection.mailbox_names[tablename]) 6376 string_query = "(%s)" % query 6377 result, data = self.connection.search(None, string_query) 6378 store_list = [item.strip() for item in data[0].split() if item.strip().isdigit()] 6379 counter = len(store_list) 6380 return counter
6381
6382 - def delete(self, tablename, query):
6383 counter = 0 6384 if query: 6385 if use_common_filters(query): 6386 query = self.common_filter(query, [tablename,]) 6387 result, data = self.connection.select(self.connection.mailbox_names[tablename]) 6388 string_query = "(%s)" % query 6389 result, data = self.connection.search(None, string_query) 6390 store_list = [item.strip() for item in data[0].split() if item.strip().isdigit()] 6391 for number in store_list: 6392 result, data = self.connection.store(number, "+FLAGS", "(\\Deleted)") 6393 if result == "OK": 6394 counter += 1 6395 else: 6396 raise Exception("IMAP store error: %s" % data) 6397 if counter > 0: 6398 result, data = self.connection.expunge() 6399 return counter
6400
6401 - def BELONGS(self, first, second):
6402 result = None 6403 name = self.search_fields[first.name] 6404 if name == "MESSAGE": 6405 values = [str(val) for val in second if str(val).isdigit()] 6406 result = "%s" % ",".join(values).strip() 6407 6408 elif name == "UID": 6409 values = [str(val) for val in second if str(val).isdigit()] 6410 result = "UID %s" % ",".join(values).strip() 6411 6412 else: 6413 raise Exception("Operation not supported") 6414 # result = "(%s %s)" % (self.expand(first), self.expand(second)) 6415 return result
6416
6417 - def CONTAINS(self, first, second, case_sensitive=False):
6418 # silently ignore, only case sensitive 6419 result = None 6420 name = self.search_fields[first.name] 6421 6422 if name in ("FROM", "TO", "SUBJECT", "TEXT"): 6423 result = "%s \"%s\"" % (name, self.expand(second)) 6424 else: 6425 if first.name in ("cc", "bcc"): 6426 result = "%s \"%s\"" % (first.name.upper(), self.expand(second)) 6427 elif first.name == "mime": 6428 result = "HEADER Content-Type \"%s\"" % self.expand(second) 6429 else: 6430 raise Exception("Operation not supported") 6431 return result
6432
6433 - def GT(self, first, second):
6434 result = None 6435 name = self.search_fields[first.name] 6436 if name == "MESSAGE": 6437 last_message = self.get_last_message(first.tablename) 6438 result = "%d:%d" % (int(self.expand(second)) + 1, last_message) 6439 elif name == "UID": 6440 # GT and LT may not return 6441 # expected sets depending on 6442 # the uid format implemented 6443 try: 6444 pedestal, threshold = self.get_uid_bounds(first.tablename) 6445 except TypeError: 6446 e = sys.exc_info()[1] 6447 LOGGER.debug("Error requesting uid bounds: %s", str(e)) 6448 return "" 6449 try: 6450 lower_limit = int(self.expand(second)) + 1 6451 except (ValueError, TypeError): 6452 e = sys.exc_info()[1] 6453 raise Exception("Operation not supported (non integer UID)") 6454 result = "UID %s:%s" % (lower_limit, threshold) 6455 elif name == "DATE": 6456 result = "SINCE %s" % self.convert_date(second, add=datetime.timedelta(1)) 6457 elif name == "SIZE": 6458 result = "LARGER %s" % self.expand(second) 6459 else: 6460 raise Exception("Operation not supported") 6461 return result
6462
6463 - def GE(self, first, second):
6464 result = None 6465 name = self.search_fields[first.name] 6466 if name == "MESSAGE": 6467 last_message = self.get_last_message(first.tablename) 6468 result = "%s:%s" % (self.expand(second), last_message) 6469 elif name == "UID": 6470 # GT and LT may not return 6471 # expected sets depending on 6472 # the uid format implemented 6473 try: 6474 pedestal, threshold = self.get_uid_bounds(first.tablename) 6475 except TypeError: 6476 e = sys.exc_info()[1] 6477 LOGGER.debug("Error requesting uid bounds: %s", str(e)) 6478 return "" 6479 lower_limit = self.expand(second) 6480 result = "UID %s:%s" % (lower_limit, threshold) 6481 elif name == "DATE": 6482 result = "SINCE %s" % self.convert_date(second) 6483 else: 6484 raise Exception("Operation not supported") 6485 return result
6486
6487 - def LT(self, first, second):
6488 result = None 6489 name = self.search_fields[first.name] 6490 if name == "MESSAGE": 6491 result = "%s:%s" % (1, int(self.expand(second)) - 1) 6492 elif name == "UID": 6493 try: 6494 pedestal, threshold = self.get_uid_bounds(first.tablename) 6495 except TypeError: 6496 e = sys.exc_info()[1] 6497 LOGGER.debug("Error requesting uid bounds: %s", str(e)) 6498 return "" 6499 try: 6500 upper_limit = int(self.expand(second)) - 1 6501 except (ValueError, TypeError): 6502 e = sys.exc_info()[1] 6503 raise Exception("Operation not supported (non integer UID)") 6504 result = "UID %s:%s" % (pedestal, upper_limit) 6505 elif name == "DATE": 6506 result = "BEFORE %s" % self.convert_date(second) 6507 elif name == "SIZE": 6508 result = "SMALLER %s" % self.expand(second) 6509 else: 6510 raise Exception("Operation not supported") 6511 return result
6512
6513 - def LE(self, first, second):
6514 result = None 6515 name = self.search_fields[first.name] 6516 if name == "MESSAGE": 6517 result = "%s:%s" % (1, self.expand(second)) 6518 elif name == "UID": 6519 try: 6520 pedestal, threshold = self.get_uid_bounds(first.tablename) 6521 except TypeError: 6522 e = sys.exc_info()[1] 6523 LOGGER.debug("Error requesting uid bounds: %s", str(e)) 6524 return "" 6525 upper_limit = int(self.expand(second)) 6526 result = "UID %s:%s" % (pedestal, upper_limit) 6527 elif name == "DATE": 6528 result = "BEFORE %s" % self.convert_date(second, add=datetime.timedelta(1)) 6529 else: 6530 raise Exception("Operation not supported") 6531 return result
6532
6533 - def NE(self, first, second=None):
6534 if (second is None) and isinstance(first, Field): 6535 # All records special table query 6536 if first.type == "id": 6537 return self.GE(first, 1) 6538 result = self.NOT(self.EQ(first, second)) 6539 result = result.replace("NOT NOT", "").strip() 6540 return result
6541
6542 - def EQ(self,first,second):
6543 name = self.search_fields[first.name] 6544 result = None 6545 if name is not None: 6546 if name == "MESSAGE": 6547 # query by message sequence number 6548 result = "%s" % self.expand(second) 6549 elif name == "UID": 6550 result = "UID %s" % self.expand(second) 6551 elif name == "DATE": 6552 result = "ON %s" % self.convert_date(second) 6553 6554 elif name in self.flags: 6555 if second: 6556 result = "%s" % (name.upper()[1:]) 6557 else: 6558 result = "NOT %s" % (name.upper()[1:]) 6559 else: 6560 raise Exception("Operation not supported") 6561 else: 6562 raise Exception("Operation not supported") 6563 return result
6564
6565 - def AND(self, first, second):
6566 result = "%s %s" % (self.expand(first), self.expand(second)) 6567 return result
6568
6569 - def OR(self, first, second):
6570 result = "OR %s %s" % (self.expand(first), self.expand(second)) 6571 return "%s" % result.replace("OR OR", "OR")
6572
6573 - def NOT(self, first):
6574 result = "NOT %s" % self.expand(first) 6575 return result
6576 6577 ######################################################################## 6578 # end of adapters 6579 ######################################################################## 6580 6581 ADAPTERS = { 6582 'sqlite': SQLiteAdapter, 6583 'spatialite': SpatiaLiteAdapter, 6584 'sqlite:memory': SQLiteAdapter, 6585 'spatialite:memory': SpatiaLiteAdapter, 6586 'mysql': MySQLAdapter, 6587 'postgres': PostgreSQLAdapter, 6588 'postgres:psycopg2': PostgreSQLAdapter, 6589 'postgres:pg8000': PostgreSQLAdapter, 6590 'postgres2:psycopg2': NewPostgreSQLAdapter, 6591 'postgres2:pg8000': NewPostgreSQLAdapter, 6592 'oracle': OracleAdapter, 6593 'mssql': MSSQLAdapter, 6594 'mssql2': MSSQL2Adapter, 6595 'mssql3': MSSQL3Adapter, 6596 'sybase': SybaseAdapter, 6597 'db2': DB2Adapter, 6598 'teradata': TeradataAdapter, 6599 'informix': InformixAdapter, 6600 'informix-se': InformixSEAdapter, 6601 'firebird': FireBirdAdapter, 6602 'firebird_embedded': FireBirdAdapter, 6603 'ingres': IngresAdapter, 6604 'ingresu': IngresUnicodeAdapter, 6605 'sapdb': SAPDBAdapter, 6606 'cubrid': CubridAdapter, 6607 'jdbc:sqlite': JDBCSQLiteAdapter, 6608 'jdbc:sqlite:memory': JDBCSQLiteAdapter, 6609 'jdbc:postgres': JDBCPostgreSQLAdapter, 6610 'gae': GoogleDatastoreAdapter, # discouraged, for backward compatibility 6611 'google:datastore': GoogleDatastoreAdapter, 6612 'google:sql': GoogleSQLAdapter, 6613 'couchdb': CouchDBAdapter, 6614 'mongodb': MongoDBAdapter, 6615 'imap': IMAPAdapter 6616 }
6617 6618 6619 -def sqlhtml_validators(field):
6620 """ 6621 Field type validation, using web2py's validators mechanism. 6622 6623 makes sure the content of a field is in line with the declared 6624 fieldtype 6625 """ 6626 db = field.db 6627 if not have_validators: 6628 return [] 6629 field_type, field_length = field.type, field.length 6630 if isinstance(field_type, SQLCustomType): 6631 if hasattr(field_type, 'validator'): 6632 return field_type.validator 6633 else: 6634 field_type = field_type.type 6635 elif not isinstance(field_type,str): 6636 return [] 6637 requires=[] 6638 def ff(r,id): 6639 row=r(id) 6640 if not row: 6641 return id 6642 elif hasattr(r, '_format') and isinstance(r._format,str): 6643 return r._format % row 6644 elif hasattr(r, '_format') and callable(r._format): 6645 return r._format(row) 6646 else: 6647 return id
6648 if field_type in (('string', 'text', 'password')): 6649 requires.append(validators.IS_LENGTH(field_length)) 6650 elif field_type == 'json': 6651 requires.append(validators.IS_EMPTY_OR(validators.IS_JSON())) 6652 elif field_type == 'double' or field_type == 'float': 6653 requires.append(validators.IS_FLOAT_IN_RANGE(-1e100, 1e100)) 6654 elif field_type in ('integer','bigint'): 6655 requires.append(validators.IS_INT_IN_RANGE(-1e100, 1e100)) 6656 elif field_type.startswith('decimal'): 6657 requires.append(validators.IS_DECIMAL_IN_RANGE(-10**10, 10**10)) 6658 elif field_type == 'date': 6659 requires.append(validators.IS_DATE()) 6660 elif field_type == 'time': 6661 requires.append(validators.IS_TIME()) 6662 elif field_type == 'datetime': 6663 requires.append(validators.IS_DATETIME()) 6664 elif db and field_type.startswith('reference') and \ 6665 field_type.find('.') < 0 and \ 6666 field_type[10:] in db.tables: 6667 referenced = db[field_type[10:]] 6668 def repr_ref(id, row=None, r=referenced, f=ff): return f(r, id) 6669 field.represent = field.represent or repr_ref 6670 if hasattr(referenced, '_format') and referenced._format: 6671 requires = validators.IS_IN_DB(db,referenced._id, 6672 referenced._format) 6673 if field.unique: 6674 requires._and = validators.IS_NOT_IN_DB(db,field) 6675 if field.tablename == field_type[10:]: 6676 return validators.IS_EMPTY_OR(requires) 6677 return requires 6678 elif db and field_type.startswith('list:reference') and \ 6679 field_type.find('.') < 0 and \ 6680 field_type[15:] in db.tables: 6681 referenced = db[field_type[15:]] 6682 def list_ref_repr(ids, row=None, r=referenced, f=ff): 6683 if not ids: 6684 return None 6685 refs = None 6686 db, id = r._db, r._id 6687 if isinstance(db._adapter, GoogleDatastoreAdapter): 6688 def count(values): return db(id.belongs(values)).select(id) 6689 rx = range(0, len(ids), 30) 6690 refs = reduce(lambda a,b:a&b, [count(ids[i:i+30]) for i in rx]) 6691 else: 6692 refs = db(id.belongs(ids)).select(id) 6693 return (refs and ', '.join(str(f(r,x.id)) for x in refs) or '') 6694 field.represent = field.represent or list_ref_repr 6695 if hasattr(referenced, '_format') and referenced._format: 6696 requires = validators.IS_IN_DB(db,referenced._id, 6697 referenced._format,multiple=True) 6698 else: 6699 requires = validators.IS_IN_DB(db,referenced._id, 6700 multiple=True) 6701 if field.unique: 6702 requires._and = validators.IS_NOT_IN_DB(db,field) 6703 return requires 6704 elif field_type.startswith('list:'): 6705 def repr_list(values,row=None): return', '.join(str(v) for v in (values or [])) 6706 field.represent = field.represent or repr_list 6707 if field.unique: 6708 requires.insert(0,validators.IS_NOT_IN_DB(db,field)) 6709 sff = ['in', 'do', 'da', 'ti', 'de', 'bo'] 6710 if field.notnull and not field_type[:2] in sff: 6711 requires.insert(0, validators.IS_NOT_EMPTY()) 6712 elif not field.notnull and field_type[:2] in sff and requires: 6713 requires[-1] = validators.IS_EMPTY_OR(requires[-1]) 6714 return requires 6715
6716 6717 -def bar_escape(item):
6718 return str(item).replace('|', '||')
6719
6720 -def bar_encode(items):
6721 return '|%s|' % '|'.join(bar_escape(item) for item in items if str(item).strip())
6722
6723 -def bar_decode_integer(value):
6724 if not hasattr(value,'split') and hasattr(value,'read'): 6725 value = value.read() 6726 return [int(x) for x in value.split('|') if x.strip()]
6727
6728 -def bar_decode_string(value):
6729 return [x.replace('||', '|') for x in 6730 REGEX_UNPACK.split(value[1:-1]) if x.strip()]
6731
6732 6733 -class Row(object):
6734 6735 """ 6736 a dictionary that lets you do d['a'] as well as d.a 6737 this is only used to store a Row 6738 """ 6739
6740 - def __init__(self,*args,**kwargs):
6741 self.__dict__.update(*args,**kwargs)
6742
6743 - def __getitem__(self, key):
6744 key=str(key) 6745 m = REGEX_TABLE_DOT_FIELD.match(key) 6746 if key in self.get('_extra',{}): 6747 return self._extra[key] 6748 elif m: 6749 try: 6750 return ogetattr(self, m.group(1))[m.group(2)] 6751 except (KeyError,AttributeError,TypeError): 6752 key = m.group(2) 6753 return ogetattr(self, key)
6754
6755 - def __setitem__(self, key, value):
6756 setattr(self, str(key), value)
6757 6758 __delitem__ = delattr 6759 6760 __copy__ = lambda self: Row(self) 6761 6762 __call__ = __getitem__ 6763
6764 - def get(self,key,default=None):
6765 return self.__dict__.get(key,default)
6766
6767 - def __contains__(self,key):
6768 return key in self.__dict__
6769 6770 has_key = __contains__ 6771
6772 - def __nonzero__(self):
6773 return len(self.__dict__)>0
6774
6775 - def update(self, *args, **kwargs):
6776 self.__dict__.update(*args, **kwargs)
6777
6778 - def keys(self):
6779 return self.__dict__.keys()
6780
6781 - def items(self):
6782 return self.__dict__.items()
6783
6784 - def values(self):
6785 return self.__dict__.values()
6786
6787 - def __iter__(self):
6788 return self.__dict__.__iter__()
6789
6790 - def iteritems(self):
6791 return self.__dict__.iteritems()
6792
6793 - def __str__(self):
6794 ### this could be made smarter 6795 return '<Row %s>' % self.as_dict()
6796
6797 - def __repr__(self):
6798 return '<Row %s>' % self.as_dict()
6799
6800 - def __int__(self):
6801 return object.__getattribute__(self,'id')
6802
6803 - def __eq__(self,other):
6804 try: 6805 return self.as_dict() == other.as_dict() 6806 except AttributeError: 6807 return False
6808
6809 - def __ne__(self,other):
6810 return not (self == other)
6811
6812 - def __copy__(self):
6813 return Row(dict(self))
6814
6815 - def as_dict(self, datetime_to_str=False, custom_types=None):
6816 SERIALIZABLE_TYPES = [str, unicode, int, long, float, bool, list, dict] 6817 if isinstance(custom_types,(list,tuple,set)): 6818 SERIALIZABLE_TYPES += custom_types 6819 elif custom_types: 6820 SERIALIZABLE_TYPES.append(custom_types) 6821 d = dict(self) 6822 for k in copy.copy(d.keys()): 6823 v=d[k] 6824 if d[k] is None: 6825 continue 6826 elif isinstance(v,Row): 6827 d[k]=v.as_dict() 6828 elif isinstance(v,Reference): 6829 d[k]=int(v) 6830 elif isinstance(v,decimal.Decimal): 6831 d[k]=float(v) 6832 elif isinstance(v, (datetime.date, datetime.datetime, datetime.time)): 6833 if datetime_to_str: 6834 d[k] = v.isoformat().replace('T',' ')[:19] 6835 elif not isinstance(v,tuple(SERIALIZABLE_TYPES)): 6836 del d[k] 6837 return d
6838
6839 - def as_xml(self, row_name="row", colnames=None, indent=' '):
6840 def f(row,field,indent=' '): 6841 if isinstance(row,Row): 6842 spc = indent+' \n' 6843 items = [f(row[x],x,indent+' ') for x in row] 6844 return '%s<%s>\n%s\n%s</%s>' % ( 6845 indent, 6846 field, 6847 spc.join(item for item in items if item), 6848 indent, 6849 field) 6850 elif not callable(row): 6851 if REGEX_ALPHANUMERIC.match(field): 6852 return '%s<%s>%s</%s>' % (indent,field,row,field) 6853 else: 6854 return '%s<extra name="%s">%s</extra>' % \ 6855 (indent,field,row) 6856 else: 6857 return None
6858 return f(self, row_name, indent=indent)
6859
6860 - def as_json(self, mode="object", default=None, colnames=None, 6861 serialize=True, **kwargs):
6862 """ 6863 serializes the table to a JSON list of objects 6864 kwargs are passed to .as_dict method 6865 only "object" mode supported for single row 6866 6867 serialize = False used by Rows.as_json 6868 TODO: return array mode with query column order 6869 """ 6870 6871 def inner_loop(record, col): 6872 (t, f) = col.split('.') 6873 res = None 6874 if not REGEX_TABLE_DOT_FIELD.match(col): 6875 key = col 6876 res = record._extra[col] 6877 else: 6878 key = f 6879 if isinstance(record.get(t, None), Row): 6880 res = record[t][f] 6881 else: 6882 res = record[f] 6883 if mode == 'object': 6884 return (key, res) 6885 else: 6886 return res
6887 6888 multi = any([isinstance(v, self.__class__) for v in self.values()]) 6889 mode = mode.lower() 6890 if not mode in ['object', 'array']: 6891 raise SyntaxError('Invalid JSON serialization mode: %s' % mode) 6892 6893 if mode=='object' and colnames: 6894 item = dict([inner_loop(self, col) for col in colnames]) 6895 elif colnames: 6896 item = [inner_loop(self, col) for col in colnames] 6897 else: 6898 if not mode == 'object': 6899 raise SyntaxError('Invalid JSON serialization mode: %s' % mode) 6900 6901 if multi: 6902 item = dict() 6903 [item.update(**v.as_dict(**kwargs)) for v in self.values()] 6904 else: 6905 item = self.as_dict(**kwargs) 6906 6907 if serialize: 6908 if have_serializers: 6909 return serializers.json(item, 6910 default=default or 6911 serializers.custom_json) 6912 elif simplejson: 6913 return simplejson.dumps(item) 6914 else: 6915 raise RuntimeError("missing simplejson") 6916 else: 6917 return item 6918
6919 6920 ################################################################################ 6921 # Everything below should be independent of the specifics of the database 6922 # and should work for RDBMs and some NoSQL databases 6923 ################################################################################ 6924 6925 -class SQLCallableList(list):
6926 - def __call__(self):
6927 return copy.copy(self)
6928
6929 -def smart_query(fields,text):
6930 if not isinstance(fields,(list,tuple)): 6931 fields = [fields] 6932 new_fields = [] 6933 for field in fields: 6934 if isinstance(field,Field): 6935 new_fields.append(field) 6936 elif isinstance(field,Table): 6937 for ofield in field: 6938 new_fields.append(ofield) 6939 else: 6940 raise RuntimeError("fields must be a list of fields") 6941 fields = new_fields 6942 field_map = {} 6943 for field in fields: 6944 n = field.name.lower() 6945 if not n in field_map: 6946 field_map[n] = field 6947 n = str(field).lower() 6948 if not n in field_map: 6949 field_map[n] = field 6950 constants = {} 6951 i = 0 6952 while True: 6953 m = REGEX_CONST_STRING.search(text) 6954 if not m: break 6955 text = text[:m.start()]+('#%i' % i)+text[m.end():] 6956 constants[str(i)] = m.group()[1:-1] 6957 i+=1 6958 text = re.sub('\s+',' ',text).lower() 6959 for a,b in [('&','and'), 6960 ('|','or'), 6961 ('~','not'), 6962 ('==','='), 6963 ('<','<'), 6964 ('>','>'), 6965 ('<=','<='), 6966 ('>=','>='), 6967 ('<>','!='), 6968 ('=<','<='), 6969 ('=>','>='), 6970 ('=','='), 6971 (' less or equal than ','<='), 6972 (' greater or equal than ','>='), 6973 (' equal or less than ','<='), 6974 (' equal or greater than ','>='), 6975 (' less or equal ','<='), 6976 (' greater or equal ','>='), 6977 (' equal or less ','<='), 6978 (' equal or greater ','>='), 6979 (' not equal to ','!='), 6980 (' not equal ','!='), 6981 (' equal to ','='), 6982 (' equal ','='), 6983 (' equals ','='), 6984 (' less than ','<'), 6985 (' greater than ','>'), 6986 (' starts with ','startswith'), 6987 (' ends with ','endswith'), 6988 (' not in ' , 'notbelongs'), 6989 (' in ' , 'belongs'), 6990 (' is ','=')]: 6991 if a[0]==' ': 6992 text = text.replace(' is'+a,' %s ' % b) 6993 text = text.replace(a,' %s ' % b) 6994 text = re.sub('\s+',' ',text).lower() 6995 text = re.sub('(?P<a>[\<\>\!\=])\s+(?P<b>[\<\>\!\=])','\g<a>\g<b>',text) 6996 query = field = neg = op = logic = None 6997 for item in text.split(): 6998 if field is None: 6999 if item == 'not': 7000 neg = True 7001 elif not neg and not logic and item in ('and','or'): 7002 logic = item 7003 elif item in field_map: 7004 field = field_map[item] 7005 else: 7006 raise RuntimeError("Invalid syntax") 7007 elif not field is None and op is None: 7008 op = item 7009 elif not op is None: 7010 if item.startswith('#'): 7011 if not item[1:] in constants: 7012 raise RuntimeError("Invalid syntax") 7013 value = constants[item[1:]] 7014 else: 7015 value = item 7016 if field.type in ('text', 'string', 'json'): 7017 if op == '=': op = 'like' 7018 if op == '=': new_query = field==value 7019 elif op == '<': new_query = field<value 7020 elif op == '>': new_query = field>value 7021 elif op == '<=': new_query = field<=value 7022 elif op == '>=': new_query = field>=value 7023 elif op == '!=': new_query = field!=value 7024 elif op == 'belongs': new_query = field.belongs(value.split(',')) 7025 elif op == 'notbelongs': new_query = ~field.belongs(value.split(',')) 7026 elif field.type in ('text', 'string', 'json'): 7027 if op == 'contains': new_query = field.contains(value) 7028 elif op == 'like': new_query = field.like(value) 7029 elif op == 'startswith': new_query = field.startswith(value) 7030 elif op == 'endswith': new_query = field.endswith(value) 7031 else: raise RuntimeError("Invalid operation") 7032 elif field._db._adapter.dbengine=='google:datastore' and \ 7033 field.type in ('list:integer', 'list:string', 'list:reference'): 7034 if op == 'contains': new_query = field.contains(value) 7035 else: raise RuntimeError("Invalid operation") 7036 else: raise RuntimeError("Invalid operation") 7037 if neg: new_query = ~new_query 7038 if query is None: 7039 query = new_query 7040 elif logic == 'and': 7041 query &= new_query 7042 elif logic == 'or': 7043 query |= new_query 7044 field = op = neg = logic = None 7045 return query
7046
7047 -class DAL(object):
7048 7049 """ 7050 an instance of this class represents a database connection 7051 7052 Example:: 7053 7054 db = DAL('sqlite://test.db') 7055 db.define_table('tablename', Field('fieldname1'), 7056 Field('fieldname2')) 7057 7058 (experimental) 7059 you can pass a dict object as uri with the uri string 7060 and table/field definitions. For an example of valid data check 7061 the output of: 7062 7063 >>> db.as_dict(flat=True, sanitize=False) 7064 """ 7065
7066 - def __new__(cls, uri='sqlite://dummy.db', *args, **kwargs):
7067 if not hasattr(THREAD_LOCAL,'db_instances'): 7068 THREAD_LOCAL.db_instances = {} 7069 if not hasattr(THREAD_LOCAL,'db_instances_zombie'): 7070 THREAD_LOCAL.db_instances_zombie = {} 7071 if uri == '<zombie>': 7072 db_uid = kwargs['db_uid'] # a zombie must have a db_uid! 7073 if db_uid in THREAD_LOCAL.db_instances: 7074 db_group = THREAD_LOCAL.db_instances[db_uid] 7075 db = db_group[-1] 7076 elif db_uid in THREAD_LOCAL.db_instances_zombie: 7077 db = THREAD_LOCAL.db_instances_zombie[db_uid] 7078 else: 7079 db = super(DAL, cls).__new__(cls) 7080 THREAD_LOCAL.db_instances_zombie[db_uid] = db 7081 else: 7082 db_uid = kwargs.get('db_uid',hashlib_md5(repr(uri)).hexdigest()) 7083 if db_uid in THREAD_LOCAL.db_instances_zombie: 7084 db = THREAD_LOCAL.db_instances_zombie[db_uid] 7085 del THREAD_LOCAL.db_instances_zombie[db_uid] 7086 else: 7087 db = super(DAL, cls).__new__(cls) 7088 db_group = THREAD_LOCAL.db_instances.get(db_uid,[]) 7089 db_group.append(db) 7090 THREAD_LOCAL.db_instances[db_uid] = db_group 7091 db._db_uid = db_uid 7092 return db
7093 7094 @staticmethod
7095 - def set_folder(folder):
7096 """ 7097 # ## this allows gluon to set a folder for this thread 7098 # ## <<<<<<<<< Should go away as new DAL replaces old sql.py 7099 """ 7100 BaseAdapter.set_folder(folder)
7101 7102 @staticmethod
7103 - def get_instances():
7104 """ 7105 Returns a dictionary with uri as key with timings and defined tables 7106 {'sqlite://storage.sqlite': { 7107 'dbstats': [(select auth_user.email from auth_user, 0.02009)], 7108 'dbtables': { 7109 'defined': ['auth_cas', 'auth_event', 'auth_group', 7110 'auth_membership', 'auth_permission', 'auth_user'], 7111 'lazy': '[]' 7112 } 7113 } 7114 } 7115 """ 7116 dbs = getattr(THREAD_LOCAL,'db_instances',{}).items() 7117 infos = {} 7118 for db_uid, db_group in dbs: 7119 for db in db_group: 7120 if not db._uri: 7121 continue 7122 k = hide_password(db._uri) 7123 infos[k] = dict(dbstats = [(row[0], row[1]) for row in db._timings], 7124 dbtables = {'defined': 7125 sorted(list(set(db.tables) - 7126 set(db._LAZY_TABLES.keys()))), 7127 'lazy': sorted(db._LAZY_TABLES.keys())} 7128 ) 7129 return infos
7130 7131 @staticmethod
7132 - def distributed_transaction_begin(*instances):
7133 if not instances: 7134 return 7135 thread_key = '%s.%s' % (socket.gethostname(), threading.currentThread()) 7136 keys = ['%s.%i' % (thread_key, i) for (i,db) in instances] 7137 instances = enumerate(instances) 7138 for (i, db) in instances: 7139 if not db._adapter.support_distributed_transaction(): 7140 raise SyntaxError( 7141 'distributed transaction not suported by %s' % db._dbname) 7142 for (i, db) in instances: 7143 db._adapter.distributed_transaction_begin(keys[i])
7144 7145 @staticmethod
7146 - def distributed_transaction_commit(*instances):
7147 if not instances: 7148 return 7149 instances = enumerate(instances) 7150 thread_key = '%s.%s' % (socket.gethostname(), threading.currentThread()) 7151 keys = ['%s.%i' % (thread_key, i) for (i,db) in instances] 7152 for (i, db) in instances: 7153 if not db._adapter.support_distributed_transaction(): 7154 raise SyntaxError( 7155 'distributed transaction not suported by %s' % db._dbanme) 7156 try: 7157 for (i, db) in instances: 7158 db._adapter.prepare(keys[i]) 7159 except: 7160 for (i, db) in instances: 7161 db._adapter.rollback_prepared(keys[i]) 7162 raise RuntimeError('failure to commit distributed transaction') 7163 else: 7164 for (i, db) in instances: 7165 db._adapter.commit_prepared(keys[i]) 7166 return
7167
7168 - def __init__(self, uri=DEFAULT_URI, 7169 pool_size=0, folder=None, 7170 db_codec='UTF-8', check_reserved=None, 7171 migrate=True, fake_migrate=False, 7172 migrate_enabled=True, fake_migrate_all=False, 7173 decode_credentials=False, driver_args=None, 7174 adapter_args=None, attempts=5, auto_import=False, 7175 bigint_id=False,debug=False,lazy_tables=False, 7176 db_uid=None, do_connect=True, after_connection=None):
7177 """ 7178 Creates a new Database Abstraction Layer instance. 7179 7180 Keyword arguments: 7181 7182 :uri: string that contains information for connecting to a database. 7183 (default: 'sqlite://dummy.db') 7184 :pool_size: How many open connections to make to the database object. 7185 :folder: where .table files will be created. 7186 automatically set within web2py 7187 use an explicit path when using DAL outside web2py 7188 :db_codec: string encoding of the database (default: 'UTF-8') 7189 :check_reserved: list of adapters to check tablenames and column names 7190 against sql/nosql reserved keywords. (Default None) 7191 7192 * 'common' List of sql keywords that are common to all database types 7193 such as "SELECT, INSERT". (recommended) 7194 * 'all' Checks against all known SQL keywords. (not recommended) 7195 <adaptername> Checks against the specific adapters list of keywords 7196 (recommended) 7197 * '<adaptername>_nonreserved' Checks against the specific adapters 7198 list of nonreserved keywords. (if available) 7199 :migrate (defaults to True) sets default migrate behavior for all tables 7200 :fake_migrate (defaults to False) sets default fake_migrate behavior for all tables 7201 :migrate_enabled (defaults to True). If set to False disables ALL migrations 7202 :fake_migrate_all (defaults to False). If sets to True fake migrates ALL tables 7203 :attempts (defaults to 5). Number of times to attempt connecting 7204 :auto_import (defaults to False). If set, import automatically table definitions from the 7205 databases folder 7206 :bigint_id (defaults to False): If set, turn on bigint instead of int for id fields 7207 :lazy_tables (defaults to False): delay table definition until table access 7208 :after_connection (defaults to None): a callable that will be execute after the connection 7209 """ 7210 7211 items = None 7212 if isinstance(uri, dict): 7213 if "items" in uri: 7214 items = uri.pop("items") 7215 try: 7216 newuri = uri.pop("uri") 7217 except KeyError: 7218 newuri = DEFAULT_URI 7219 locals().update(uri) 7220 uri = newuri 7221 7222 if uri == '<zombie>' and db_uid is not None: return 7223 if not decode_credentials: 7224 credential_decoder = lambda cred: cred 7225 else: 7226 credential_decoder = lambda cred: urllib.unquote(cred) 7227 self._folder = folder 7228 if folder: 7229 self.set_folder(folder) 7230 self._uri = uri 7231 self._pool_size = pool_size 7232 self._db_codec = db_codec 7233 self._lastsql = '' 7234 self._timings = [] 7235 self._pending_references = {} 7236 self._request_tenant = 'request_tenant' 7237 self._common_fields = [] 7238 self._referee_name = '%(table)s' 7239 self._bigint_id = bigint_id 7240 self._debug = debug 7241 self._migrated = [] 7242 self._LAZY_TABLES = {} 7243 self._lazy_tables = lazy_tables 7244 self._tables = SQLCallableList() 7245 self._driver_args = driver_args 7246 self._adapter_args = adapter_args 7247 self._check_reserved = check_reserved 7248 self._decode_credentials = decode_credentials 7249 self._attempts = attempts 7250 self._do_connect = do_connect 7251 7252 if not str(attempts).isdigit() or attempts < 0: 7253 attempts = 5 7254 if uri: 7255 uris = isinstance(uri,(list,tuple)) and uri or [uri] 7256 error = '' 7257 connected = False 7258 for k in range(attempts): 7259 for uri in uris: 7260 try: 7261 if is_jdbc and not uri.startswith('jdbc:'): 7262 uri = 'jdbc:'+uri 7263 self._dbname = REGEX_DBNAME.match(uri).group() 7264 if not self._dbname in ADAPTERS: 7265 raise SyntaxError("Error in URI '%s' or database not supported" % self._dbname) 7266 # notice that driver args or {} else driver_args 7267 # defaults to {} global, not correct 7268 kwargs = dict(db=self,uri=uri, 7269 pool_size=pool_size, 7270 folder=folder, 7271 db_codec=db_codec, 7272 credential_decoder=credential_decoder, 7273 driver_args=driver_args or {}, 7274 adapter_args=adapter_args or {}, 7275 do_connect=do_connect, 7276 after_connection=after_connection) 7277 self._adapter = ADAPTERS[self._dbname](**kwargs) 7278 types = ADAPTERS[self._dbname].types 7279 # copy so multiple DAL() possible 7280 self._adapter.types = copy.copy(types) 7281 if bigint_id: 7282 if 'big-id' in types and 'reference' in types: 7283 self._adapter.types['id'] = types['big-id'] 7284 self._adapter.types['reference'] = types['big-reference'] 7285 connected = True 7286 break 7287 except SyntaxError: 7288 raise 7289 except Exception: 7290 tb = traceback.format_exc() 7291 sys.stderr.write('DEBUG: connect attempt %i, connection error:\n%s' % (k, tb)) 7292 if connected: 7293 break 7294 else: 7295 time.sleep(1) 7296 if not connected: 7297 raise RuntimeError("Failure to connect, tried %d times:\n%s" % (attempts, tb)) 7298 else: 7299 self._adapter = BaseAdapter(db=self,pool_size=0, 7300 uri='None',folder=folder, 7301 db_codec=db_codec, after_connection=after_connection) 7302 migrate = fake_migrate = False 7303 adapter = self._adapter 7304 self._uri_hash = hashlib_md5(adapter.uri).hexdigest() 7305 self.check_reserved = check_reserved 7306 if self.check_reserved: 7307 from reserved_sql_keywords import ADAPTERS as RSK 7308 self.RSK = RSK 7309 self._migrate = migrate 7310 self._fake_migrate = fake_migrate 7311 self._migrate_enabled = migrate_enabled 7312 self._fake_migrate_all = fake_migrate_all 7313 if auto_import or items: 7314 self.import_table_definitions(adapter.folder, 7315 items=items)
7316 7317 @property
7318 - def tables(self):
7319 return self._tables
7320
7321 - def import_table_definitions(self, path, migrate=False, 7322 fake_migrate=False, items=None):
7323 pattern = pjoin(path,self._uri_hash+'_*.table') 7324 if items: 7325 for tablename, table in items.iteritems(): 7326 # TODO: read all field/table options 7327 fields = [] 7328 # remove unsupported/illegal Table arguments 7329 [table.pop(name) for name in ("name", "fields") if 7330 name in table] 7331 if "items" in table: 7332 for fieldname, field in table.pop("items").iteritems(): 7333 # remove unsupported/illegal Field arguments 7334 [field.pop(key) for key in ("requires", "name", 7335 "compute", "colname") if key in field] 7336 fields.append(Field(str(fieldname), **field)) 7337 self.define_table(str(tablename), *fields, **table) 7338 else: 7339 for filename in glob.glob(pattern): 7340 tfile = self._adapter.file_open(filename, 'r') 7341 try: 7342 sql_fields = pickle.load(tfile) 7343 name = filename[len(pattern)-7:-6] 7344 mf = [(value['sortable'], 7345 Field(key, 7346 type=value['type'], 7347 length=value.get('length',None), 7348 notnull=value.get('notnull',False), 7349 unique=value.get('unique',False))) \ 7350 for key, value in sql_fields.iteritems()] 7351 mf.sort(lambda a,b: cmp(a[0],b[0])) 7352 self.define_table(name,*[item[1] for item in mf], 7353 **dict(migrate=migrate, 7354 fake_migrate=fake_migrate)) 7355 finally: 7356 self._adapter.file_close(tfile)
7357
7358 - def check_reserved_keyword(self, name):
7359 """ 7360 Validates ``name`` against SQL keywords 7361 Uses self.check_reserve which is a list of 7362 operators to use. 7363 self.check_reserved 7364 ['common', 'postgres', 'mysql'] 7365 self.check_reserved 7366 ['all'] 7367 """ 7368 for backend in self.check_reserved: 7369 if name.upper() in self.RSK[backend]: 7370 raise SyntaxError( 7371 'invalid table/column name "%s" is a "%s" reserved SQL/NOSQL keyword' % (name, backend.upper()))
7372
7373 - def parse_as_rest(self,patterns,args,vars,queries=None,nested_select=True):
7374 """ 7375 EXAMPLE: 7376 7377 db.define_table('person',Field('name'),Field('info')) 7378 db.define_table('pet',Field('ownedby',db.person),Field('name'),Field('info')) 7379 7380 @request.restful() 7381 def index(): 7382 def GET(*args,**vars): 7383 patterns = [ 7384 "/friends[person]", 7385 "/{person.name}/:field", 7386 "/{person.name}/pets[pet.ownedby]", 7387 "/{person.name}/pets[pet.ownedby]/{pet.name}", 7388 "/{person.name}/pets[pet.ownedby]/{pet.name}/:field", 7389 ("/dogs[pet]", db.pet.info=='dog'), 7390 ("/dogs[pet]/{pet.name.startswith}", db.pet.info=='dog'), 7391 ] 7392 parser = db.parse_as_rest(patterns,args,vars) 7393 if parser.status == 200: 7394 return dict(content=parser.response) 7395 else: 7396 raise HTTP(parser.status,parser.error) 7397 7398 def POST(table_name,**vars): 7399 if table_name == 'person': 7400 return db.person.validate_and_insert(**vars) 7401 elif table_name == 'pet': 7402 return db.pet.validate_and_insert(**vars) 7403 else: 7404 raise HTTP(400) 7405 return locals() 7406 """ 7407 7408 db = self 7409 re1 = REGEX_SEARCH_PATTERN 7410 re2 = REGEX_SQUARE_BRACKETS 7411 7412 def auto_table(table,base='',depth=0): 7413 patterns = [] 7414 for field in db[table].fields: 7415 if base: 7416 tag = '%s/%s' % (base,field.replace('_','-')) 7417 else: 7418 tag = '/%s/%s' % (table.replace('_','-'),field.replace('_','-')) 7419 f = db[table][field] 7420 if not f.readable: continue 7421 if f.type=='id' or 'slug' in field or f.type.startswith('reference'): 7422 tag += '/{%s.%s}' % (table,field) 7423 patterns.append(tag) 7424 patterns.append(tag+'/:field') 7425 elif f.type.startswith('boolean'): 7426 tag += '/{%s.%s}' % (table,field) 7427 patterns.append(tag) 7428 patterns.append(tag+'/:field') 7429 elif f.type in ('float','double','integer','bigint'): 7430 tag += '/{%s.%s.ge}/{%s.%s.lt}' % (table,field,table,field) 7431 patterns.append(tag) 7432 patterns.append(tag+'/:field') 7433 elif f.type.startswith('list:'): 7434 tag += '/{%s.%s.contains}' % (table,field) 7435 patterns.append(tag) 7436 patterns.append(tag+'/:field') 7437 elif f.type in ('date','datetime'): 7438 tag+= '/{%s.%s.year}' % (table,field) 7439 patterns.append(tag) 7440 patterns.append(tag+'/:field') 7441 tag+='/{%s.%s.month}' % (table,field) 7442 patterns.append(tag) 7443 patterns.append(tag+'/:field') 7444 tag+='/{%s.%s.day}' % (table,field) 7445 patterns.append(tag) 7446 patterns.append(tag+'/:field') 7447 if f.type in ('datetime','time'): 7448 tag+= '/{%s.%s.hour}' % (table,field) 7449 patterns.append(tag) 7450 patterns.append(tag+'/:field') 7451 tag+='/{%s.%s.minute}' % (table,field) 7452 patterns.append(tag) 7453 patterns.append(tag+'/:field') 7454 tag+='/{%s.%s.second}' % (table,field) 7455 patterns.append(tag) 7456 patterns.append(tag+'/:field') 7457 if depth>0: 7458 for f in db[table]._referenced_by: 7459 tag+='/%s[%s.%s]' % (table,f.tablename,f.name) 7460 patterns.append(tag) 7461 patterns += auto_table(table,base=tag,depth=depth-1) 7462 return patterns
7463 7464 if patterns == 'auto': 7465 patterns=[] 7466 for table in db.tables: 7467 if not table.startswith('auth_'): 7468 patterns.append('/%s[%s]' % (table,table)) 7469 patterns += auto_table(table,base='',depth=1) 7470 else: 7471 i = 0 7472 while i<len(patterns): 7473 pattern = patterns[i] 7474 if not isinstance(pattern,str): 7475 pattern = pattern[0] 7476 tokens = pattern.split('/') 7477 if tokens[-1].startswith(':auto') and re2.match(tokens[-1]): 7478 new_patterns = auto_table(tokens[-1][tokens[-1].find('[')+1:-1], 7479 '/'.join(tokens[:-1])) 7480 patterns = patterns[:i]+new_patterns+patterns[i+1:] 7481 i += len(new_patterns) 7482 else: 7483 i += 1 7484 if '/'.join(args) == 'patterns': 7485 return Row({'status':200,'pattern':'list', 7486 'error':None,'response':patterns}) 7487 for pattern in patterns: 7488 basequery, exposedfields = None, [] 7489 if isinstance(pattern,tuple): 7490 if len(pattern)==2: 7491 pattern, basequery = pattern 7492 elif len(pattern)>2: 7493 pattern, basequery, exposedfields = pattern[0:3] 7494 otable=table=None 7495 if not isinstance(queries,dict): 7496 dbset=db(queries) 7497 if basequery is not None: 7498 dbset = dbset(basequery) 7499 i=0 7500 tags = pattern[1:].split('/') 7501 if len(tags)!=len(args): 7502 continue 7503 for tag in tags: 7504 if re1.match(tag): 7505 # print 're1:'+tag 7506 tokens = tag[1:-1].split('.') 7507 table, field = tokens[0], tokens[1] 7508 if not otable or table == otable: 7509 if len(tokens)==2 or tokens[2]=='eq': 7510 query = db[table][field]==args[i] 7511 elif tokens[2]=='ne': 7512 query = db[table][field]!=args[i] 7513 elif tokens[2]=='lt': 7514 query = db[table][field]<args[i] 7515 elif tokens[2]=='gt': 7516 query = db[table][field]>args[i] 7517 elif tokens[2]=='ge': 7518 query = db[table][field]>=args[i] 7519 elif tokens[2]=='le': 7520 query = db[table][field]<=args[i] 7521 elif tokens[2]=='year': 7522 query = db[table][field].year()==args[i] 7523 elif tokens[2]=='month': 7524 query = db[table][field].month()==args[i] 7525 elif tokens[2]=='day': 7526 query = db[table][field].day()==args[i] 7527 elif tokens[2]=='hour': 7528 query = db[table][field].hour()==args[i] 7529 elif tokens[2]=='minute': 7530 query = db[table][field].minutes()==args[i] 7531 elif tokens[2]=='second': 7532 query = db[table][field].seconds()==args[i] 7533 elif tokens[2]=='startswith': 7534 query = db[table][field].startswith(args[i]) 7535 elif tokens[2]=='contains': 7536 query = db[table][field].contains(args[i]) 7537 else: 7538 raise RuntimeError("invalid pattern: %s" % pattern) 7539 if len(tokens)==4 and tokens[3]=='not': 7540 query = ~query 7541 elif len(tokens)>=4: 7542 raise RuntimeError("invalid pattern: %s" % pattern) 7543 if not otable and isinstance(queries,dict): 7544 dbset = db(queries[table]) 7545 if basequery is not None: 7546 dbset = dbset(basequery) 7547 dbset=dbset(query) 7548 else: 7549 raise RuntimeError("missing relation in pattern: %s" % pattern) 7550 elif re2.match(tag) and args[i]==tag[:tag.find('[')]: 7551 ref = tag[tag.find('[')+1:-1] 7552 if '.' in ref and otable: 7553 table,field = ref.split('.') 7554 selfld = '_id' 7555 if db[table][field].type.startswith('reference '): 7556 refs = [ x.name for x in db[otable] if x.type == db[table][field].type ] 7557 else: 7558 refs = [ x.name for x in db[table]._referenced_by if x.tablename==otable ] 7559 if refs: 7560 selfld = refs[0] 7561 if nested_select: 7562 try: 7563 dbset=db(db[table][field].belongs(dbset._select(db[otable][selfld]))) 7564 except ValueError: 7565 return Row({'status':400,'pattern':pattern, 7566 'error':'invalid path','response':None}) 7567 else: 7568 items = [item.id for item in dbset.select(db[otable][selfld])] 7569 dbset=db(db[table][field].belongs(items)) 7570 else: 7571 table = ref 7572 if not otable and isinstance(queries,dict): 7573 dbset = db(queries[table]) 7574 dbset=dbset(db[table]) 7575 elif tag==':field' and table: 7576 # print 're3:'+tag 7577 field = args[i] 7578 if not field in db[table]: break 7579 # hand-built patterns should respect .readable=False as well 7580 if not db[table][field].readable: 7581 return Row({'status':418,'pattern':pattern, 7582 'error':'I\'m a teapot','response':None}) 7583 try: 7584 distinct = vars.get('distinct', False) == 'True' 7585 offset = int(vars.get('offset',None) or 0) 7586 limits = (offset,int(vars.get('limit',None) or 1000)+offset) 7587 except ValueError: 7588 return Row({'status':400,'error':'invalid limits','response':None}) 7589 items = dbset.select(db[table][field], distinct=distinct, limitby=limits) 7590 if items: 7591 return Row({'status':200,'response':items, 7592 'pattern':pattern}) 7593 else: 7594 return Row({'status':404,'pattern':pattern, 7595 'error':'no record found','response':None}) 7596 elif tag != args[i]: 7597 break 7598 otable = table 7599 i += 1 7600 if i==len(tags) and table: 7601 ofields = vars.get('order',db[table]._id.name).split('|') 7602 try: 7603 orderby = [db[table][f] if not f.startswith('~') else ~db[table][f[1:]] for f in ofields] 7604 except (KeyError, AttributeError): 7605 return Row({'status':400,'error':'invalid orderby','response':None}) 7606 if exposedfields: 7607 fields = [field for field in db[table] if str(field).split('.')[-1] in exposedfields and field.readable] 7608 else: 7609 fields = [field for field in db[table] if field.readable] 7610 count = dbset.count() 7611 try: 7612 offset = int(vars.get('offset',None) or 0) 7613 limits = (offset,int(vars.get('limit',None) or 1000)+offset) 7614 except ValueError: 7615 return Row({'status':400,'error':'invalid limits','response':None}) 7616 if count > limits[1]-limits[0]: 7617 return Row({'status':400,'error':'too many records','response':None}) 7618 try: 7619 response = dbset.select(limitby=limits,orderby=orderby,*fields) 7620 except ValueError: 7621 return Row({'status':400,'pattern':pattern, 7622 'error':'invalid path','response':None}) 7623 return Row({'status':200,'response':response, 7624 'pattern':pattern,'count':count}) 7625 return Row({'status':400,'error':'no matching pattern','response':None})
7626
7627 - def define_table( 7628 self, 7629 tablename, 7630 *fields, 7631 **args 7632 ):
7633 if not isinstance(tablename,str): 7634 raise SyntaxError("missing table name") 7635 elif hasattr(self,tablename) or tablename in self.tables: 7636 if not args.get('redefine',False): 7637 raise SyntaxError('table already defined: %s' % tablename) 7638 elif tablename.startswith('_') or hasattr(self,tablename) or \ 7639 REGEX_PYTHON_KEYWORDS.match(tablename): 7640 raise SyntaxError('invalid table name: %s' % tablename) 7641 elif self.check_reserved: 7642 self.check_reserved_keyword(tablename) 7643 else: 7644 invalid_args = set(args)-TABLE_ARGS 7645 if invalid_args: 7646 raise SyntaxError('invalid table "%s" attributes: %s' \ 7647 % (tablename,invalid_args)) 7648 if self._lazy_tables and not tablename in self._LAZY_TABLES: 7649 self._LAZY_TABLES[tablename] = (tablename,fields,args) 7650 table = None 7651 else: 7652 table = self.lazy_define_table(tablename,*fields,**args) 7653 if not tablename in self.tables: 7654 self.tables.append(tablename) 7655 return table
7656
7657 - def lazy_define_table( 7658 self, 7659 tablename, 7660 *fields, 7661 **args 7662 ):
7663 args_get = args.get 7664 common_fields = self._common_fields 7665 if common_fields: 7666 fields = list(fields) + list(common_fields) 7667 7668 table_class = args_get('table_class',Table) 7669 table = table_class(self, tablename, *fields, **args) 7670 table._actual = True 7671 self[tablename] = table 7672 # must follow above line to handle self references 7673 table._create_references() 7674 for field in table: 7675 if field.requires == DEFAULT: 7676 field.requires = sqlhtml_validators(field) 7677 7678 migrate = self._migrate_enabled and args_get('migrate',self._migrate) 7679 if migrate and not self._uri in (None,'None') \ 7680 or self._adapter.dbengine=='google:datastore': 7681 fake_migrate = self._fake_migrate_all or \ 7682 args_get('fake_migrate',self._fake_migrate) 7683 polymodel = args_get('polymodel',None) 7684 try: 7685 GLOBAL_LOCKER.acquire() 7686 self._lastsql = self._adapter.create_table( 7687 table,migrate=migrate, 7688 fake_migrate=fake_migrate, 7689 polymodel=polymodel) 7690 finally: 7691 GLOBAL_LOCKER.release() 7692 else: 7693 table._dbt = None 7694 on_define = args_get('on_define',None) 7695 if on_define: on_define(table) 7696 return table
7697
7698 - def as_dict(self, flat=False, sanitize=True, field_options=True):
7699 dbname = db_uid = uri = None 7700 if not sanitize: 7701 uri, dbname, db_uid = (self._uri, self._dbname, self._db_uid) 7702 db_as_dict = dict(items={}, tables=[], uri=uri, dbname=dbname, 7703 db_uid=db_uid, 7704 **dict([(k, getattr(self, "_" + k)) for 7705 k in 'pool_size','folder','db_codec', 7706 'check_reserved','migrate','fake_migrate', 7707 'migrate_enabled','fake_migrate_all', 7708 'decode_credentials','driver_args', 7709 'adapter_args', 'attempts', 7710 'bigint_id','debug','lazy_tables', 7711 'do_connect'])) 7712 7713 for table in self: 7714 tablename = str(table) 7715 db_as_dict["tables"].append(tablename) 7716 db_as_dict["items"][tablename] = table.as_dict(flat=flat, 7717 sanitize=sanitize, 7718 field_options=field_options) 7719 return db_as_dict
7720
7721 - def as_xml(self, sanitize=True, field_options=True):
7722 if not have_serializers: 7723 raise ImportError("No xml serializers available") 7724 d = self.as_dict(flat=True, sanitize=sanitize, 7725 field_options=field_options) 7726 return serializers.xml(d)
7727
7728 - def as_json(self, sanitize=True, field_options=True):
7729 if not have_serializers: 7730 raise ImportError("No json serializers available") 7731 d = self.as_dict(flat=True, sanitize=sanitize, 7732 field_options=field_options) 7733 return serializers.json(d)
7734
7735 - def as_yaml(self, sanitize=True, field_options=True):
7736 if not have_serializers: 7737 raise ImportError("No YAML serializers available") 7738 d = self.as_dict(flat=True, sanitize=sanitize, 7739 field_options=field_options) 7740 return serializers.yaml(d)
7741
7742 - def __contains__(self, tablename):
7743 try: 7744 return tablename in self.tables 7745 except AttributeError: 7746 # The instance has no .tables attribute yet 7747 return False
7748 7749 has_key = __contains__ 7750
7751 - def get(self,key,default=None):
7752 return self.__dict__.get(key,default)
7753
7754 - def __iter__(self):
7755 for tablename in self.tables: 7756 yield self[tablename]
7757
7758 - def __getitem__(self, key):
7759 return self.__getattr__(str(key))
7760
7761 - def __getattr__(self, key):
7762 if ogetattr(self,'_lazy_tables') and \ 7763 key in ogetattr(self,'_LAZY_TABLES'): 7764 tablename, fields, args = self._LAZY_TABLES.pop(key) 7765 return self.lazy_define_table(tablename,*fields,**args) 7766 return ogetattr(self, key)
7767
7768 - def __setitem__(self, key, value):
7769 osetattr(self, str(key), value)
7770
7771 - def __setattr__(self, key, value):
7772 if key[:1]!='_' and key in self: 7773 raise SyntaxError( 7774 'Object %s exists and cannot be redefined' % key) 7775 osetattr(self,key,value)
7776 7777 __delitem__ = object.__delattr__ 7778
7779 - def __repr__(self):
7780 if hasattr(self,'_uri'): 7781 return '<DAL uri="%s">' % hide_password(str(self._uri)) 7782 else: 7783 return '<DAL db_uid="%s">' % self._db_uid
7784
7785 - def smart_query(self,fields,text):
7786 return Set(self, smart_query(fields,text))
7787
7788 - def __call__(self, query=None, ignore_common_filters=None):
7789 if isinstance(query,Table): 7790 query = self._adapter.id_query(query) 7791 elif isinstance(query,Field): 7792 query = query!=None 7793 elif isinstance(query, dict): 7794 icf = query.get("ignore_common_filters") 7795 if icf: ignore_common_filters = icf 7796 return Set(self, query, ignore_common_filters=ignore_common_filters)
7797
7798 - def commit(self):
7799 self._adapter.commit()
7800
7801 - def rollback(self):
7802 self._adapter.rollback()
7803
7804 - def close(self):
7805 self._adapter.close() 7806 if self._db_uid in THREAD_LOCAL.db_instances: 7807 db_group = THREAD_LOCAL.db_instances[self._db_uid] 7808 db_group.remove(self) 7809 if not db_group: 7810 del THREAD_LOCAL.db_instances[self._db_uid]
7811
7812 - def executesql(self, query, placeholders=None, as_dict=False, 7813 fields=None, colnames=None):
7814 """ 7815 placeholders is optional and will always be None. 7816 If using raw SQL with placeholders, placeholders may be 7817 a sequence of values to be substituted in 7818 or, (if supported by the DB driver), a dictionary with keys 7819 matching named placeholders in your SQL. 7820 7821 Added 2009-12-05 "as_dict" optional argument. Will always be 7822 None when using DAL. If using raw SQL can be set to True 7823 and the results cursor returned by the DB driver will be 7824 converted to a sequence of dictionaries keyed with the db 7825 field names. Tested with SQLite but should work with any database 7826 since the cursor.description used to get field names is part of the 7827 Python dbi 2.0 specs. Results returned with as_dict=True are 7828 the same as those returned when applying .to_list() to a DAL query. 7829 7830 [{field1: value1, field2: value2}, {field1: value1b, field2: value2b}] 7831 7832 Added 2012-08-24 "fields" and "colnames" optional arguments. If either 7833 is provided, the results cursor returned by the DB driver will be 7834 converted to a DAL Rows object using the db._adapter.parse() method. 7835 7836 The "fields" argument is a list of DAL Field objects that match the 7837 fields returned from the DB. The Field objects should be part of one or 7838 more Table objects defined on the DAL object. The "fields" list can 7839 include one or more DAL Table objects in addition to or instead of 7840 including Field objects, or it can be just a single table (not in a 7841 list). In that case, the Field objects will be extracted from the 7842 table(s). 7843 7844 Instead of specifying the "fields" argument, the "colnames" argument 7845 can be specified as a list of field names in tablename.fieldname format. 7846 Again, these should represent tables and fields defined on the DAL 7847 object. 7848 7849 It is also possible to specify both "fields" and the associated 7850 "colnames". In that case, "fields" can also include DAL Expression 7851 objects in addition to Field objects. For Field objects in "fields", 7852 the associated "colnames" must still be in tablename.fieldname format. 7853 For Expression objects in "fields", the associated "colnames" can 7854 be any arbitrary labels. 7855 7856 Note, the DAL Table objects referred to by "fields" or "colnames" can 7857 be dummy tables and do not have to represent any real tables in the 7858 database. Also, note that the "fields" and "colnames" must be in the 7859 same order as the fields in the results cursor returned from the DB. 7860 """ 7861 adapter = self._adapter 7862 if placeholders: 7863 adapter.execute(query, placeholders) 7864 else: 7865 adapter.execute(query) 7866 if as_dict: 7867 if not hasattr(adapter.cursor,'description'): 7868 raise RuntimeError("database does not support executesql(...,as_dict=True)") 7869 # Non-DAL legacy db query, converts cursor results to dict. 7870 # sequence of 7-item sequences. each sequence tells about a column. 7871 # first item is always the field name according to Python Database API specs 7872 columns = adapter.cursor.description 7873 # reduce the column info down to just the field names 7874 fields = [f[0] for f in columns] 7875 # will hold our finished resultset in a list 7876 data = adapter._fetchall() 7877 # convert the list for each row into a dictionary so it's 7878 # easier to work with. row['field_name'] rather than row[0] 7879 return [dict(zip(fields,row)) for row in data] 7880 try: 7881 data = adapter._fetchall() 7882 except: 7883 return None 7884 if fields or colnames: 7885 fields = [] if fields is None else fields 7886 if not isinstance(fields, list): 7887 fields = [fields] 7888 extracted_fields = [] 7889 for field in fields: 7890 if isinstance(field, Table): 7891 extracted_fields.extend([f for f in field]) 7892 else: 7893 extracted_fields.append(field) 7894 if not colnames: 7895 colnames = ['%s.%s' % (f.tablename, f.name) 7896 for f in extracted_fields] 7897 data = adapter.parse( 7898 data, fields=extracted_fields, colnames=colnames) 7899 return data
7900
7901 - def _remove_references_to(self, thistable):
7902 for table in self: 7903 table._referenced_by = [field for field in table._referenced_by 7904 if not field.table==thistable]
7905
7906 - def export_to_csv_file(self, ofile, *args, **kwargs):
7907 step = int(kwargs.get('max_fetch_rows,',500)) 7908 write_colnames = kwargs['write_colnames'] = \ 7909 kwargs.get("write_colnames", True) 7910 for table in self.tables: 7911 ofile.write('TABLE %s\r\n' % table) 7912 query = self._adapter.id_query(self[table]) 7913 nrows = self(query).count() 7914 kwargs['write_colnames'] = write_colnames 7915 for k in range(0,nrows,step): 7916 self(query).select(limitby=(k,k+step)).export_to_csv_file( 7917 ofile, *args, **kwargs) 7918 kwargs['write_colnames'] = False 7919 ofile.write('\r\n\r\n') 7920 ofile.write('END')
7921
7922 - def import_from_csv_file(self, ifile, id_map=None, null='<NULL>', 7923 unique='uuid', *args, **kwargs):
7924 #if id_map is None: id_map={} 7925 id_offset = {} # only used if id_map is None 7926 for line in ifile: 7927 line = line.strip() 7928 if not line: 7929 continue 7930 elif line == 'END': 7931 return 7932 elif not line.startswith('TABLE ') or not line[6:] in self.tables: 7933 raise SyntaxError('invalid file format') 7934 else: 7935 tablename = line[6:] 7936 self[tablename].import_from_csv_file( 7937 ifile, id_map, null, unique, id_offset, *args, **kwargs)
7938
7939 -def DAL_unpickler(db_uid):
7940 return DAL('<zombie>',db_uid=db_uid)
7941
7942 -def DAL_pickler(db):
7943 return DAL_unpickler, (db._db_uid,)
7944 7945 copyreg.pickle(DAL, DAL_pickler, DAL_unpickler)
7946 7947 -class SQLALL(object):
7948 """ 7949 Helper class providing a comma-separated string having all the field names 7950 (prefixed by table name and '.') 7951 7952 normally only called from within gluon.sql 7953 """ 7954
7955 - def __init__(self, table):
7956 self._table = table
7957
7958 - def __str__(self):
7959 return ', '.join([str(field) for field in self._table])
7960
7961 # class Reference(int): 7962 -class Reference(long):
7963
7964 - def __allocate(self):
7965 if not self._record: 7966 self._record = self._table[int(self)] 7967 if not self._record: 7968 raise RuntimeError( 7969 "Using a recursive select but encountered a broken reference: %s %d"%(self._table, int(self)))
7970
7971 - def __getattr__(self, key):
7972 if key == 'id': 7973 return int(self) 7974 self.__allocate() 7975 return self._record.get(key, None)
7976
7977 - def get(self, key, default=None):
7978 return self.__getattr__(key, default)
7979
7980 - def __setattr__(self, key, value):
7981 if key.startswith('_'): 7982 int.__setattr__(self, key, value) 7983 return 7984 self.__allocate() 7985 self._record[key] = value
7986
7987 - def __getitem__(self, key):
7988 if key == 'id': 7989 return int(self) 7990 self.__allocate() 7991 return self._record.get(key, None)
7992
7993 - def __setitem__(self,key,value):
7994 self.__allocate() 7995 self._record[key] = value
7996
7997 7998 -def Reference_unpickler(data):
7999 return marshal.loads(data)
8000
8001 -def Reference_pickler(data):
8002 try: 8003 marshal_dump = marshal.dumps(int(data)) 8004 except AttributeError: 8005 marshal_dump = 'i%s' % struct.pack('<i', int(data)) 8006 return (Reference_unpickler, (marshal_dump,))
8007 8008 copyreg.pickle(Reference, Reference_pickler, Reference_unpickler)
8009 8010 8011 -class Table(object):
8012 8013 """ 8014 an instance of this class represents a database table 8015 8016 Example:: 8017 8018 db = DAL(...) 8019 db.define_table('users', Field('name')) 8020 db.users.insert(name='me') # print db.users._insert(...) to see SQL 8021 db.users.drop() 8022 """ 8023
8024 - def __init__( 8025 self, 8026 db, 8027 tablename, 8028 *fields, 8029 **args 8030 ):
8031 """ 8032 Initializes the table and performs checking on the provided fields. 8033 8034 Each table will have automatically an 'id'. 8035 8036 If a field is of type Table, the fields (excluding 'id') from that table 8037 will be used instead. 8038 8039 :raises SyntaxError: when a supplied field is of incorrect type. 8040 """ 8041 self._actual = False # set to True by define_table() 8042 self._tablename = tablename 8043 self._sequence_name = args.get('sequence_name',None) or \ 8044 db and db._adapter.sequence_name(tablename) 8045 self._trigger_name = args.get('trigger_name',None) or \ 8046 db and db._adapter.trigger_name(tablename) 8047 self._common_filter = args.get('common_filter', None) 8048 self._format = args.get('format',None) 8049 self._singular = args.get( 8050 'singular',tablename.replace('_',' ').capitalize()) 8051 self._plural = args.get( 8052 'plural',pluralize(self._singular.lower()).capitalize()) 8053 # horrible but for backard compatibility of appamdin: 8054 if 'primarykey' in args and args['primarykey']: 8055 self._primarykey = args.get('primarykey', None) 8056 8057 self._before_insert = [] 8058 self._before_update = [Set.delete_uploaded_files] 8059 self._before_delete = [Set.delete_uploaded_files] 8060 self._after_insert = [] 8061 self._after_update = [] 8062 self._after_delete = [] 8063 8064 fieldnames,newfields=set(),[] 8065 if hasattr(self,'_primarykey'): 8066 if not isinstance(self._primarykey,list): 8067 raise SyntaxError( 8068 "primarykey must be a list of fields from table '%s'" \ 8069 % tablename) 8070 if len(self._primarykey)==1: 8071 self._id = [f for f in fields if isinstance(f,Field) \ 8072 and f.name==self._primarykey[0]][0] 8073 elif not [f for f in fields if isinstance(f,Field) and f.type=='id']: 8074 field = Field('id', 'id') 8075 newfields.append(field) 8076 fieldnames.add('id') 8077 self._id = field 8078 virtual_fields = [] 8079 for field in fields: 8080 if isinstance(field, (FieldMethod, FieldVirtual)): 8081 virtual_fields.append(field) 8082 elif isinstance(field, Field) and not field.name in fieldnames: 8083 if field.db is not None: 8084 field = copy.copy(field) 8085 newfields.append(field) 8086 fieldnames.add(field.name) 8087 if field.type=='id': 8088 self._id = field 8089 elif isinstance(field, Table): 8090 table = field 8091 for field in table: 8092 if not field.name in fieldnames and not field.type=='id': 8093 t2 = not table._actual and self._tablename 8094 field = field.clone(point_self_references_to=t2) 8095 newfields.append(field) 8096 fieldnames.add(field.name) 8097 elif not isinstance(field, (Field, Table)): 8098 raise SyntaxError( 8099 'define_table argument is not a Field or Table: %s' % field) 8100 fields = newfields 8101 self._db = db 8102 tablename = tablename 8103 self._fields = SQLCallableList() 8104 self.virtualfields = [] 8105 fields = list(fields) 8106 8107 if db and db._adapter.uploads_in_blob==True: 8108 uploadfields = [f.name for f in fields if f.type=='blob'] 8109 for field in fields: 8110 fn = field.uploadfield 8111 if isinstance(field, Field) and field.type == 'upload'\ 8112 and fn is True: 8113 fn = field.uploadfield = '%s_blob' % field.name 8114 if isinstance(fn,str) and not fn in uploadfields: 8115 fields.append(Field(fn,'blob',default='', 8116 writable=False,readable=False)) 8117 8118 lower_fieldnames = set() 8119 reserved = dir(Table) + ['fields'] 8120 for field in fields: 8121 field_name = field.name 8122 if db and db.check_reserved: 8123 db.check_reserved_keyword(field_name) 8124 elif field_name in reserved: 8125 raise SyntaxError("field name %s not allowed" % field_name) 8126 8127 if field_name.lower() in lower_fieldnames: 8128 raise SyntaxError("duplicate field %s in table %s" \ 8129 % (field_name, tablename)) 8130 else: 8131 lower_fieldnames.add(field_name.lower()) 8132 8133 self.fields.append(field_name) 8134 self[field_name] = field 8135 if field.type == 'id': 8136 self['id'] = field 8137 field.tablename = field._tablename = tablename 8138 field.table = field._table = self 8139 field.db = field._db = db 8140 if db and not field.type in ('text', 'blob', 'json') and \ 8141 db._adapter.maxcharlength < field.length: 8142 field.length = db._adapter.maxcharlength 8143 self.ALL = SQLALL(self) 8144 8145 if hasattr(self,'_primarykey'): 8146 for k in self._primarykey: 8147 if k not in self.fields: 8148 raise SyntaxError( 8149 "primarykey must be a list of fields from table '%s " % tablename) 8150 else: 8151 self[k].notnull = True 8152 for field in virtual_fields: 8153 self[field.name] = field
8154 8155 @property
8156 - def fields(self):
8157 return self._fields
8158
8159 - def update(self,*args,**kwargs):
8160 raise RuntimeError("Syntax Not Supported")
8161
8162 - def _enable_record_versioning(self, 8163 archive_db=None, 8164 archive_name = '%(tablename)s_archive', 8165 current_record = 'current_record', 8166 is_active = 'is_active'):
8167 archive_db = archive_db or self._db 8168 archive_name = archive_name % dict(tablename=self._tablename) 8169 if archive_name in archive_db.tables(): 8170 return # do not try define the archive if already exists 8171 fieldnames = self.fields() 8172 field_type = self if archive_db is self._db else 'bigint' 8173 archive_db.define_table( 8174 archive_name, 8175 Field(current_record,field_type), 8176 *[field.clone(unique=False) for field in self]) 8177 self._before_update.append( 8178 lambda qset,fs,db=archive_db,an=archive_name,cn=current_record: 8179 archive_record(qset,fs,db[an],cn)) 8180 if is_active and is_active in fieldnames: 8181 self._before_delete.append( 8182 lambda qset: qset.update(is_active=False)) 8183 newquery = lambda query, t=self: t.is_active == True 8184 query = self._common_filter 8185 if query: 8186 newquery = query & newquery 8187 self._common_filter = newquery
8188
8189 - def _validate(self,**vars):
8190 errors = Row() 8191 for key,value in vars.iteritems(): 8192 value,error = self[key].validate(value) 8193 if error: 8194 errors[key] = error 8195 return errors
8196
8197 - def _create_references(self):
8198 db = self._db 8199 pr = db._pending_references 8200 self._referenced_by = [] 8201 for field in self: 8202 fieldname = field.name 8203 field_type = field.type 8204 if isinstance(field_type,str) and field_type[:10] == 'reference ': 8205 ref = field_type[10:].strip() 8206 if not ref.split(): 8207 raise SyntaxError('Table: reference to nothing: %s' %ref) 8208 refs = ref.split('.') 8209 rtablename = refs[0] 8210 if not rtablename in db: 8211 pr[rtablename] = pr.get(rtablename,[]) + [field] 8212 continue 8213 rtable = db[rtablename] 8214 if len(refs)==2: 8215 rfieldname = refs[1] 8216 if not hasattr(rtable,'_primarykey'): 8217 raise SyntaxError( 8218 'keyed tables can only reference other keyed tables (for now)') 8219 if rfieldname not in rtable.fields: 8220 raise SyntaxError( 8221 "invalid field '%s' for referenced table '%s' in table '%s'" \ 8222 % (rfieldname, rtablename, self._tablename)) 8223 rtable._referenced_by.append(field) 8224 for referee in pr.get(self._tablename,[]): 8225 self._referenced_by.append(referee)
8226
8227 - def _filter_fields(self, record, id=False):
8228 return dict([(k, v) for (k, v) in record.iteritems() if k 8229 in self.fields and (self[k].type!='id' or id)])
8230
8231 - def _build_query(self,key):
8232 """ for keyed table only """ 8233 query = None 8234 for k,v in key.iteritems(): 8235 if k in self._primarykey: 8236 if query: 8237 query = query & (self[k] == v) 8238 else: 8239 query = (self[k] == v) 8240 else: 8241 raise SyntaxError( 8242 'Field %s is not part of the primary key of %s' % \ 8243 (k,self._tablename)) 8244 return query
8245
8246 - def __getitem__(self, key):
8247 if not key: 8248 return None 8249 elif isinstance(key, dict): 8250 """ for keyed table """ 8251 query = self._build_query(key) 8252 rows = self._db(query).select() 8253 if rows: 8254 return rows[0] 8255 return None 8256 elif str(key).isdigit() or 'google' in DRIVERS and isinstance(key, Key): 8257 return self._db(self._id == key).select(limitby=(0,1)).first() 8258 elif key: 8259 return ogetattr(self, str(key))
8260
8261 - def __call__(self, key=DEFAULT, **kwargs):
8262 for_update = kwargs.get('_for_update',False) 8263 if '_for_update' in kwargs: del kwargs['_for_update'] 8264 8265 orderby = kwargs.get('_orderby',None) 8266 if '_orderby' in kwargs: del kwargs['_orderby'] 8267 8268 if not key is DEFAULT: 8269 if isinstance(key, Query): 8270 record = self._db(key).select( 8271 limitby=(0,1),for_update=for_update, orderby=orderby).first() 8272 elif not str(key).isdigit(): 8273 record = None 8274 else: 8275 record = self._db(self._id == key).select( 8276 limitby=(0,1),for_update=for_update, orderby=orderby).first() 8277 if record: 8278 for k,v in kwargs.iteritems(): 8279 if record[k]!=v: return None 8280 return record 8281 elif kwargs: 8282 query = reduce(lambda a,b:a&b,[self[k]==v for k,v in kwargs.iteritems()]) 8283 return self._db(query).select(limitby=(0,1),for_update=for_update, orderby=orderby).first() 8284 else: 8285 return None
8286
8287 - def __setitem__(self, key, value):
8288 if isinstance(key, dict) and isinstance(value, dict): 8289 """ option for keyed table """ 8290 if set(key.keys()) == set(self._primarykey): 8291 value = self._filter_fields(value) 8292 kv = {} 8293 kv.update(value) 8294 kv.update(key) 8295 if not self.insert(**kv): 8296 query = self._build_query(key) 8297 self._db(query).update(**self._filter_fields(value)) 8298 else: 8299 raise SyntaxError( 8300 'key must have all fields from primary key: %s'%\ 8301 (self._primarykey)) 8302 elif str(key).isdigit(): 8303 if key == 0: 8304 self.insert(**self._filter_fields(value)) 8305 elif self._db(self._id == key)\ 8306 .update(**self._filter_fields(value)) is None: 8307 raise SyntaxError('No such record: %s' % key) 8308 else: 8309 if isinstance(key, dict): 8310 raise SyntaxError( 8311 'value must be a dictionary: %s' % value) 8312 osetattr(self, str(key), value)
8313 8314 __getattr__ = __getitem__ 8315
8316 - def __setattr__(self, key, value):
8317 if key[:1]!='_' and key in self: 8318 raise SyntaxError('Object exists and cannot be redefined: %s' % key) 8319 osetattr(self,key,value)
8320
8321 - def __delitem__(self, key):
8322 if isinstance(key, dict): 8323 query = self._build_query(key) 8324 if not self._db(query).delete(): 8325 raise SyntaxError('No such record: %s' % key) 8326 elif not str(key).isdigit() or \ 8327 not self._db(self._id == key).delete(): 8328 raise SyntaxError('No such record: %s' % key)
8329
8330 - def __contains__(self,key):
8331 return hasattr(self,key)
8332 8333 has_key = __contains__ 8334
8335 - def items(self):
8336 return self.__dict__.items()
8337
8338 - def __iter__(self):
8339 for fieldname in self.fields: 8340 yield self[fieldname]
8341
8342 - def iteritems(self):
8343 return self.__dict__.iteritems()
8344 8345
8346 - def __repr__(self):
8347 return '<Table %s (%s)>' % (self._tablename,','.join(self.fields()))
8348
8349 - def __str__(self):
8350 if hasattr(self,'_ot') and self._ot is not None: 8351 if 'Oracle' in str(type(self._db._adapter)): # <<< patch 8352 return '%s %s' % (self._ot, self._tablename) # <<< patch 8353 return '%s AS %s' % (self._ot, self._tablename) 8354 return self._tablename
8355
8356 - def _drop(self, mode = ''):
8357 return self._db._adapter._drop(self, mode)
8358
8359 - def drop(self, mode = ''):
8360 return self._db._adapter.drop(self,mode)
8361
8362 - def _listify(self,fields,update=False):
8363 new_fields = {} # format: new_fields[name] = (field,value) 8364 8365 # store all fields passed as input in new_fields 8366 for name in fields: 8367 if not name in self.fields: 8368 if name != 'id': 8369 raise SyntaxError( 8370 'Field %s does not belong to the table' % name) 8371 else: 8372 field = self[name] 8373 value = fields[name] 8374 if field.filter_in: 8375 value = field.filter_in(value) 8376 new_fields[name] = (field,value) 8377 8378 # check all fields that should be in the table but are not passed 8379 to_compute = [] 8380 for ofield in self: 8381 name = ofield.name 8382 if not name in new_fields: 8383 # if field is supposed to be computed, compute it! 8384 if ofield.compute: # save those to compute for later 8385 to_compute.append((name,ofield)) 8386 # if field is required, check its default value 8387 elif not update and not ofield.default is None: 8388 value = ofield.default 8389 fields[name] = value 8390 new_fields[name] = (ofield,value) 8391 # if this is an update, user the update field instead 8392 elif update and not ofield.update is None: 8393 value = ofield.update 8394 fields[name] = value 8395 new_fields[name] = (ofield,value) 8396 # if the field is still not there but it should, error 8397 elif not update and ofield.required: 8398 raise RuntimeError( 8399 'Table: missing required field: %s' % name) 8400 # now deal with fields that are supposed to be computed 8401 if to_compute: 8402 row = Row(fields) 8403 for name,ofield in to_compute: 8404 # try compute it 8405 try: 8406 new_fields[name] = (ofield,ofield.compute(row)) 8407 except (KeyError, AttributeError): 8408 # error sinlently unless field is required! 8409 if ofield.required: 8410 raise SyntaxError('unable to comput field: %s' % name) 8411 return new_fields.values()
8412
8413 - def _attempt_upload(self, fields):
8414 for field in self: 8415 if field.type=='upload' and field.name in fields: 8416 value = fields[field.name] 8417 if value and not isinstance(value,str): 8418 if hasattr(value,'file') and hasattr(value,'filename'): 8419 new_name = field.store(value.file,filename=value.filename) 8420 elif hasattr(value,'read') and hasattr(value,'name'): 8421 new_name = field.store(value,filename=value.name) 8422 else: 8423 raise RuntimeError("Unable to handle upload") 8424 fields[field.name] = new_name
8425
8426 - def _defaults(self, fields):
8427 "If there are no fields/values specified, return table defaults" 8428 if not fields: 8429 fields = {} 8430 for field in self: 8431 if field.type != "id": 8432 fields[field.name] = field.default 8433 return fields
8434
8435 - def _insert(self, **fields):
8436 fields = self._defaults(fields) 8437 return self._db._adapter._insert(self, self._listify(fields))
8438
8439 - def insert(self, **fields):
8440 fields = self._defaults(fields) 8441 self._attempt_upload(fields) 8442 if any(f(fields) for f in self._before_insert): return 0 8443 ret = self._db._adapter.insert(self, self._listify(fields)) 8444 if ret and self._after_insert: 8445 fields = Row(fields) 8446 [f(fields,ret) for f in self._after_insert] 8447 return ret
8448
8449 - def validate_and_insert(self,**fields):
8450 response = Row() 8451 response.errors = Row() 8452 new_fields = copy.copy(fields) 8453 for key,value in fields.iteritems(): 8454 value,error = self[key].validate(value) 8455 if error: 8456 response.errors[key] = "%s" % error 8457 else: 8458 new_fields[key] = value 8459 if not response.errors: 8460 response.id = self.insert(**new_fields) 8461 else: 8462 response.id = None 8463 return response
8464
8465 - def update_or_insert(self, _key=DEFAULT, **values):
8466 if _key is DEFAULT: 8467 record = self(**values) 8468 elif isinstance(_key,dict): 8469 record = self(**_key) 8470 else: 8471 record = self(_key) 8472 if record: 8473 record.update_record(**values) 8474 newid = None 8475 else: 8476 newid = self.insert(**values) 8477 return newid
8478
8479 - def bulk_insert(self, items):
8480 """ 8481 here items is a list of dictionaries 8482 """ 8483 items = [self._listify(item) for item in items] 8484 if any(f(item) for item in items for f in self._before_insert):return 0 8485 ret = self._db._adapter.bulk_insert(self,items) 8486 ret and [[f(item,ret[k]) for k,item in enumerate(items)] for f in self._after_insert] 8487 return ret
8488
8489 - def _truncate(self, mode = None):
8490 return self._db._adapter._truncate(self, mode)
8491
8492 - def truncate(self, mode = None):
8493 return self._db._adapter.truncate(self, mode)
8494
8495 - def import_from_csv_file( 8496 self, 8497 csvfile, 8498 id_map=None, 8499 null='<NULL>', 8500 unique='uuid', 8501 id_offset=None, # id_offset used only when id_map is None 8502 *args, **kwargs 8503 ):
8504 """ 8505 Import records from csv file. 8506 Column headers must have same names as table fields. 8507 Field 'id' is ignored. 8508 If column names read 'table.file' the 'table.' prefix is ignored. 8509 'unique' argument is a field which must be unique 8510 (typically a uuid field) 8511 'restore' argument is default False; 8512 if set True will remove old values in table first. 8513 'id_map' ff set to None will not map ids. 8514 The import will keep the id numbers in the restored table. 8515 This assumes that there is an field of type id that 8516 is integer and in incrementing order. 8517 Will keep the id numbers in restored table. 8518 """ 8519 8520 delimiter = kwargs.get('delimiter', ',') 8521 quotechar = kwargs.get('quotechar', '"') 8522 quoting = kwargs.get('quoting', csv.QUOTE_MINIMAL) 8523 restore = kwargs.get('restore', False) 8524 if restore: 8525 self._db[self].truncate() 8526 8527 reader = csv.reader(csvfile, delimiter=delimiter, 8528 quotechar=quotechar, quoting=quoting) 8529 colnames = None 8530 if isinstance(id_map, dict): 8531 if not self._tablename in id_map: 8532 id_map[self._tablename] = {} 8533 id_map_self = id_map[self._tablename] 8534 8535 def fix(field, value, id_map, id_offset): 8536 list_reference_s='list:reference' 8537 if value == null: 8538 value = None 8539 elif field.type=='blob': 8540 value = base64.b64decode(value) 8541 elif field.type=='double' or field.type=='float': 8542 if not value.strip(): 8543 value = None 8544 else: 8545 value = float(value) 8546 elif field.type in ('integer','bigint'): 8547 if not value.strip(): 8548 value = None 8549 else: 8550 value = int(value) 8551 elif field.type.startswith('list:string'): 8552 value = bar_decode_string(value) 8553 elif field.type.startswith(list_reference_s): 8554 ref_table = field.type[len(list_reference_s):].strip() 8555 if id_map is not None: 8556 value = [id_map[ref_table][int(v)] \ 8557 for v in bar_decode_string(value)] 8558 else: 8559 value = [v for v in bar_decode_string(value)] 8560 elif field.type.startswith('list:'): 8561 value = bar_decode_integer(value) 8562 elif id_map and field.type.startswith('reference'): 8563 try: 8564 value = id_map[field.type[9:].strip()][int(value)] 8565 except KeyError: 8566 pass 8567 elif id_offset and field.type.startswith('reference'): 8568 try: 8569 value = id_offset[field.type[9:].strip()]+int(value) 8570 except KeyError: 8571 pass 8572 return (field.name, value)
8573 8574 def is_id(colname): 8575 if colname in self: 8576 return self[colname].type == 'id' 8577 else: 8578 return False
8579 8580 first = True 8581 unique_idx = None 8582 for line in reader: 8583 if not line: 8584 break 8585 if not colnames: 8586 colnames = [x.split('.',1)[-1] for x in line][:len(line)] 8587 cols, cid = [], None 8588 for i,colname in enumerate(colnames): 8589 if is_id(colname): 8590 cid = i 8591 else: 8592 cols.append(i) 8593 if colname == unique: 8594 unique_idx = i 8595 else: 8596 items = [fix(self[colnames[i]], line[i], id_map, id_offset) \ 8597 for i in cols if colnames[i] in self.fields] 8598 8599 if not id_map and cid is not None and id_offset is not None and not unique_idx: 8600 csv_id = int(line[cid]) 8601 curr_id = self.insert(**dict(items)) 8602 if first: 8603 first = False 8604 # First curr_id is bigger than csv_id, 8605 # then we are not restoring but 8606 # extending db table with csv db table 8607 if curr_id>csv_id: 8608 id_offset[self._tablename] = curr_id-csv_id 8609 else: 8610 id_offset[self._tablename] = 0 8611 # create new id until we get the same as old_id+offset 8612 while curr_id<csv_id+id_offset[self._tablename]: 8613 self._db(self._db[self][colnames[cid]] == curr_id).delete() 8614 curr_id = self.insert(**dict(items)) 8615 # Validation. Check for duplicate of 'unique' &, 8616 # if present, update instead of insert. 8617 elif not unique_idx: 8618 new_id = self.insert(**dict(items)) 8619 else: 8620 unique_value = line[unique_idx] 8621 query = self._db[self][unique] == unique_value 8622 record = self._db(query).select().first() 8623 if record: 8624 record.update_record(**dict(items)) 8625 new_id = record[self._id.name] 8626 else: 8627 new_id = self.insert(**dict(items)) 8628 if id_map and cid is not None: 8629 id_map_self[int(line[cid])] = new_id 8630
8631 - def as_dict(self, flat=False, sanitize=True, field_options=True):
8632 tablename = str(self) 8633 table_as_dict = dict(name=tablename, items={}, fields=[], 8634 sequence_name=self._sequence_name, 8635 trigger_name=self._trigger_name, 8636 common_filter=self._common_filter, format=self._format, 8637 singular=self._singular, plural=self._plural) 8638 8639 for field in self: 8640 if (field.readable or field.writable) or (not sanitize): 8641 table_as_dict["fields"].append(field.name) 8642 table_as_dict["items"][field.name] = \ 8643 field.as_dict(flat=flat, sanitize=sanitize, 8644 options=field_options) 8645 return table_as_dict
8646
8647 - def as_xml(self, sanitize=True, field_options=True):
8648 if not have_serializers: 8649 raise ImportError("No xml serializers available") 8650 d = self.as_dict(flat=True, sanitize=sanitize, 8651 field_options=field_options) 8652 return serializers.xml(d)
8653
8654 - def as_json(self, sanitize=True, field_options=True):
8655 if not have_serializers: 8656 raise ImportError("No json serializers available") 8657 d = self.as_dict(flat=True, sanitize=sanitize, 8658 field_options=field_options) 8659 return serializers.json(d)
8660
8661 - def as_yaml(self, sanitize=True, field_options=True):
8662 if not have_serializers: 8663 raise ImportError("No YAML serializers available") 8664 d = self.as_dict(flat=True, sanitize=sanitize, 8665 field_options=field_options) 8666 return serializers.yaml(d)
8667
8668 - def with_alias(self, alias):
8669 return self._db._adapter.alias(self,alias)
8670
8671 - def on(self, query):
8672 return Expression(self._db,self._db._adapter.ON,self,query)
8673
8674 -def archive_record(qset,fs,archive_table,current_record):
8675 tablenames = qset.db._adapter.tables(qset.query) 8676 if len(tablenames)!=1: raise RuntimeError("cannot update join") 8677 table = qset.db[tablenames[0]] 8678 for row in qset.select(): 8679 fields = archive_table._filter_fields(row) 8680 fields[current_record] = row.id 8681 archive_table.insert(**fields) 8682 return False
8683
8684 8685 8686 -class Expression(object):
8687
8688 - def __init__( 8689 self, 8690 db, 8691 op, 8692 first=None, 8693 second=None, 8694 type=None, 8695 **optional_args 8696 ):
8697 8698 self.db = db 8699 self.op = op 8700 self.first = first 8701 self.second = second 8702 self._table = getattr(first,'_table',None) 8703 ### self._tablename = first._tablename ## CHECK 8704 if not type and first and hasattr(first,'type'): 8705 self.type = first.type 8706 else: 8707 self.type = type 8708 self.optional_args = optional_args
8709
8710 - def sum(self):
8711 db = self.db 8712 return Expression(db, db._adapter.AGGREGATE, self, 'SUM', self.type)
8713
8714 - def max(self):
8715 db = self.db 8716 return Expression(db, db._adapter.AGGREGATE, self, 'MAX', self.type)
8717
8718 - def min(self):
8719 db = self.db 8720 return Expression(db, db._adapter.AGGREGATE, self, 'MIN', self.type)
8721
8722 - def len(self):
8723 db = self.db 8724 return Expression(db, db._adapter.AGGREGATE, self, 'LENGTH', 'integer')
8725
8726 - def avg(self):
8727 db = self.db 8728 return Expression(db, db._adapter.AGGREGATE, self, 'AVG', self.type)
8729
8730 - def abs(self):
8731 db = self.db 8732 return Expression(db, db._adapter.AGGREGATE, self, 'ABS', self.type)
8733
8734 - def lower(self):
8735 db = self.db 8736 return Expression(db, db._adapter.LOWER, self, None, self.type)
8737
8738 - def upper(self):
8739 db = self.db 8740 return Expression(db, db._adapter.UPPER, self, None, self.type)
8741
8742 - def year(self):
8743 db = self.db 8744 return Expression(db, db._adapter.EXTRACT, self, 'year', 'integer')
8745
8746 - def month(self):
8747 db = self.db 8748 return Expression(db, db._adapter.EXTRACT, self, 'month', 'integer')
8749
8750 - def day(self):
8751 db = self.db 8752 return Expression(db, db._adapter.EXTRACT, self, 'day', 'integer')
8753
8754 - def hour(self):
8755 db = self.db 8756 return Expression(db, db._adapter.EXTRACT, self, 'hour', 'integer')
8757
8758 - def minutes(self):
8759 db = self.db 8760 return Expression(db, db._adapter.EXTRACT, self, 'minute', 'integer')
8761
8762 - def coalesce(self,*others):
8763 db = self.db 8764 return Expression(db, db._adapter.COALESCE, self, others, self.type)
8765
8766 - def coalesce_zero(self):
8767 db = self.db 8768 return Expression(db, db._adapter.COALESCE_ZERO, self, None, self.type)
8769
8770 - def seconds(self):
8771 db = self.db 8772 return Expression(db, db._adapter.EXTRACT, self, 'second', 'integer')
8773
8774 - def epoch(self):
8775 db = self.db 8776 return Expression(db, db._adapter.EPOCH, self, None, 'integer')
8777
8778 - def __getslice__(self, start, stop):
8779 db = self.db 8780 if start < 0: 8781 pos0 = '(%s - %d)' % (self.len(), abs(start) - 1) 8782 else: 8783 pos0 = start + 1 8784 8785 if stop < 0: 8786 length = '(%s - %d - %s)' % (self.len(), abs(stop) - 1, pos0) 8787 elif stop == sys.maxint: 8788 length = self.len() 8789 else: 8790 length = '(%s - %s)' % (stop + 1, pos0) 8791 return Expression(db,db._adapter.SUBSTRING, 8792 self, (pos0, length), self.type)
8793
8794 - def __getitem__(self, i):
8795 return self[i:i + 1]
8796
8797 - def __str__(self):
8798 return self.db._adapter.expand(self,self.type)
8799
8800 - def __or__(self, other): # for use in sortby
8801 db = self.db 8802 return Expression(db,db._adapter.COMMA,self,other,self.type)
8803
8804 - def __invert__(self):
8805 db = self.db 8806 if hasattr(self,'_op') and self.op == db._adapter.INVERT: 8807 return self.first 8808 return Expression(db,db._adapter.INVERT,self,type=self.type)
8809
8810 - def __add__(self, other):
8811 db = self.db 8812 return Expression(db,db._adapter.ADD,self,other,self.type)
8813
8814 - def __sub__(self, other):
8815 db = self.db 8816 if self.type in ('integer','bigint'): 8817 result_type = 'integer' 8818 elif self.type in ['date','time','datetime','double','float']: 8819 result_type = 'double' 8820 else: 8821 raise SyntaxError("subtraction operation not supported for type") 8822 return Expression(db,db._adapter.SUB,self,other,result_type)
8823
8824 - def __mul__(self, other):
8825 db = self.db 8826 return Expression(db,db._adapter.MUL,self,other,self.type)
8827
8828 - def __div__(self, other):
8829 db = self.db 8830 return Expression(db,db._adapter.DIV,self,other,self.type)
8831
8832 - def __mod__(self, other):
8833 db = self.db 8834 return Expression(db,db._adapter.MOD,self,other,self.type)
8835
8836 - def __eq__(self, value):
8837 db = self.db 8838 return Query(db, db._adapter.EQ, self, value)
8839
8840 - def __ne__(self, value):
8841 db = self.db 8842 return Query(db, db._adapter.NE, self, value)
8843
8844 - def __lt__(self, value):
8845 db = self.db 8846 return Query(db, db._adapter.LT, self, value)
8847
8848 - def __le__(self, value):
8849 db = self.db 8850 return Query(db, db._adapter.LE, self, value)
8851
8852 - def __gt__(self, value):
8853 db = self.db 8854 return Query(db, db._adapter.GT, self, value)
8855
8856 - def __ge__(self, value):
8857 db = self.db 8858 return Query(db, db._adapter.GE, self, value)
8859
8860 - def like(self, value, case_sensitive=False):
8861 db = self.db 8862 op = case_sensitive and db._adapter.LIKE or db._adapter.ILIKE 8863 return Query(db, op, self, value)
8864
8865 - def regexp(self, value):
8866 db = self.db 8867 return Query(db, db._adapter.REGEXP, self, value)
8868
8869 - def belongs(self, *value):
8870 """ 8871 Accepts the following inputs: 8872 field.belongs(1,2) 8873 field.belongs((1,2)) 8874 field.belongs(query) 8875 8876 Does NOT accept: 8877 field.belongs(1) 8878 """ 8879 db = self.db 8880 if len(value) == 1: 8881 value = value[0] 8882 if isinstance(value,Query): 8883 value = db(value)._select(value.first._table._id) 8884 return Query(db, db._adapter.BELONGS, self, value)
8885
8886 - def startswith(self, value):
8887 db = self.db 8888 if not self.type in ('string', 'text', 'json'): 8889 raise SyntaxError("startswith used with incompatible field type") 8890 return Query(db, db._adapter.STARTSWITH, self, value)
8891
8892 - def endswith(self, value):
8893 db = self.db 8894 if not self.type in ('string', 'text', 'json'): 8895 raise SyntaxError("endswith used with incompatible field type") 8896 return Query(db, db._adapter.ENDSWITH, self, value)
8897
8898 - def contains(self, value, all=False, case_sensitive=False):
8899 """ 8900 The case_sensitive parameters is only useful for PostgreSQL 8901 For other RDMBs it is ignored and contains is always case in-sensitive 8902 For MongoDB and GAE contains is always case sensitive 8903 """ 8904 db = self.db 8905 if isinstance(value,(list, tuple)): 8906 subqueries = [self.contains(str(v).strip(),case_sensitive=case_sensitive) 8907 for v in value if str(v).strip()] 8908 if not subqueries: 8909 return self.contains('') 8910 else: 8911 return reduce(all and AND or OR,subqueries) 8912 if not self.type in ('string', 'text', 'json') and not self.type.startswith('list:'): 8913 raise SyntaxError("contains used with incompatible field type") 8914 return Query(db, db._adapter.CONTAINS, self, value, case_sensitive=case_sensitive)
8915
8916 - def with_alias(self, alias):
8917 db = self.db 8918 return Expression(db, db._adapter.AS, self, alias, self.type)
8919 8920 # GIS expressions 8921
8922 - def st_asgeojson(self, precision=15, options=0, version=1):
8923 return Expression(self.db, self.db._adapter.ST_ASGEOJSON, self, 8924 dict(precision=precision, options=options, 8925 version=version), 'string')
8926
8927 - def st_astext(self):
8928 db = self.db 8929 return Expression(db, db._adapter.ST_ASTEXT, self, type='string')
8930
8931 - def st_x(self):
8932 db = self.db 8933 return Expression(db, db._adapter.ST_X, self, type='string')
8934
8935 - def st_y(self):
8936 db = self.db 8937 return Expression(db, db._adapter.ST_Y, self, type='string')
8938
8939 - def st_distance(self, other):
8940 db = self.db 8941 return Expression(db,db._adapter.ST_DISTANCE,self,other, 'double')
8942
8943 - def st_simplify(self, value):
8944 db = self.db 8945 return Expression(db, db._adapter.ST_SIMPLIFY, self, value, self.type)
8946 8947 # GIS queries 8948
8949 - def st_contains(self, value):
8950 db = self.db 8951 return Query(db, db._adapter.ST_CONTAINS, self, value)
8952
8953 - def st_equals(self, value):
8954 db = self.db 8955 return Query(db, db._adapter.ST_EQUALS, self, value)
8956
8957 - def st_intersects(self, value):
8958 db = self.db 8959 return Query(db, db._adapter.ST_INTERSECTS, self, value)
8960
8961 - def st_overlaps(self, value):
8962 db = self.db 8963 return Query(db, db._adapter.ST_OVERLAPS, self, value)
8964
8965 - def st_touches(self, value):
8966 db = self.db 8967 return Query(db, db._adapter.ST_TOUCHES, self, value)
8968
8969 - def st_within(self, value):
8970 db = self.db 8971 return Query(db, db._adapter.ST_WITHIN, self, value)
8972
8973 # for use in both Query and sortby 8974 8975 8976 -class SQLCustomType(object):
8977 """ 8978 allows defining of custom SQL types 8979 8980 Example:: 8981 8982 decimal = SQLCustomType( 8983 type ='double', 8984 native ='integer', 8985 encoder =(lambda x: int(float(x) * 100)), 8986 decoder = (lambda x: Decimal("0.00") + Decimal(str(float(x)/100)) ) 8987 ) 8988 8989 db.define_table( 8990 'example', 8991 Field('value', type=decimal) 8992 ) 8993 8994 :param type: the web2py type (default = 'string') 8995 :param native: the backend type 8996 :param encoder: how to encode the value to store it in the backend 8997 :param decoder: how to decode the value retrieved from the backend 8998 :param validator: what validators to use ( default = None, will use the 8999 default validator for type) 9000 """ 9001
9002 - def __init__( 9003 self, 9004 type='string', 9005 native=None, 9006 encoder=None, 9007 decoder=None, 9008 validator=None, 9009 _class=None, 9010 ):
9011 9012 self.type = type 9013 self.native = native 9014 self.encoder = encoder or (lambda x: x) 9015 self.decoder = decoder or (lambda x: x) 9016 self.validator = validator 9017 self._class = _class or type
9018
9019 - def startswith(self, text=None):
9020 try: 9021 return self.type.startswith(self, text) 9022 except TypeError: 9023 return False
9024
9025 - def __getslice__(self, a=0, b=100):
9026 return None
9027
9028 - def __getitem__(self, i):
9029 return None
9030
9031 - def __str__(self):
9032 return self._class
9033
9034 -class FieldVirtual(object):
9035 - def __init__(self, name, f=None, ftype='string',label=None,table_name=None):
9036 # for backward compatibility 9037 (self.name, self.f) = (name, f) if f else ('unkown', name) 9038 self.type = ftype 9039 self.label = label or self.name.capitalize().replace('_',' ') 9040 self.represent = IDENTITY 9041 self.formatter = IDENTITY 9042 self.comment = None 9043 self.readable = True 9044 self.writable = False 9045 self.requires = None 9046 self.widget = None 9047 self.tablename = table_name 9048 self.filter_out = None
9049
9050 -class FieldMethod(object):
9051 - def __init__(self, name, f=None, handler=None):
9052 # for backward compatibility 9053 (self.name, self.f) = (name, f) if f else ('unkown', name) 9054 self.handler = handler
9055
9056 -def list_represent(x,r=None):
9057 return ', '.join(str(y) for y in x or [])
9058
9059 -class Field(Expression):
9060 9061 Virtual = FieldVirtual 9062 Method = FieldMethod 9063 Lazy = FieldMethod # for backward compatibility 9064 9065 """ 9066 an instance of this class represents a database field 9067 9068 example:: 9069 9070 a = Field(name, 'string', length=32, default=None, required=False, 9071 requires=IS_NOT_EMPTY(), ondelete='CASCADE', 9072 notnull=False, unique=False, 9073 uploadfield=True, widget=None, label=None, comment=None, 9074 uploadfield=True, # True means store on disk, 9075 # 'a_field_name' means store in this field in db 9076 # False means file content will be discarded. 9077 writable=True, readable=True, update=None, authorize=None, 9078 autodelete=False, represent=None, uploadfolder=None, 9079 uploadseparate=False # upload to separate directories by uuid_keys 9080 # first 2 character and tablename.fieldname 9081 # False - old behavior 9082 # True - put uploaded file in 9083 # <uploaddir>/<tablename>.<fieldname>/uuid_key[:2] 9084 # directory) 9085 uploadfs=None # a pyfilesystem where to store upload 9086 9087 to be used as argument of DAL.define_table 9088 9089 allowed field types: 9090 string, boolean, integer, double, text, blob, 9091 date, time, datetime, upload, password 9092 9093 strings must have a length of Adapter.maxcharlength by default (512 or 255 for mysql) 9094 fields should have a default or they will be required in SQLFORMs 9095 the requires argument is used to validate the field input in SQLFORMs 9096 9097 """ 9098
9099 - def __init__( 9100 self, 9101 fieldname, 9102 type='string', 9103 length=None, 9104 default=DEFAULT, 9105 required=False, 9106 requires=DEFAULT, 9107 ondelete='CASCADE', 9108 notnull=False, 9109 unique=False, 9110 uploadfield=True, 9111 widget=None, 9112 label=None, 9113 comment=None, 9114 writable=True, 9115 readable=True, 9116 update=None, 9117 authorize=None, 9118 autodelete=False, 9119 represent=None, 9120 uploadfolder=None, 9121 uploadseparate=False, 9122 uploadfs=None, 9123 compute=None, 9124 custom_store=None, 9125 custom_retrieve=None, 9126 custom_retrieve_file_properties=None, 9127 custom_delete=None, 9128 filter_in = None, 9129 filter_out = None, 9130 custom_qualifier = None, 9131 map_none = None, 9132 ):
9133 self._db = self.db = None # both for backward compatibility 9134 self.op = None 9135 self.first = None 9136 self.second = None 9137 self.name = fieldname = cleanup(fieldname) 9138 if not isinstance(fieldname,str) or hasattr(Table,fieldname) or \ 9139 fieldname[0] == '_' or REGEX_PYTHON_KEYWORDS.match(fieldname): 9140 raise SyntaxError('Field: invalid field name: %s' % fieldname) 9141 self.type = type if not isinstance(type, (Table,Field)) else 'reference %s' % type 9142 self.length = length if not length is None else DEFAULTLENGTH.get(self.type,512) 9143 self.default = default if default!=DEFAULT else (update or None) 9144 self.required = required # is this field required 9145 self.ondelete = ondelete.upper() # this is for reference fields only 9146 self.notnull = notnull 9147 self.unique = unique 9148 self.uploadfield = uploadfield 9149 self.uploadfolder = uploadfolder 9150 self.uploadseparate = uploadseparate 9151 self.uploadfs = uploadfs 9152 self.widget = widget 9153 self.comment = comment 9154 self.writable = writable 9155 self.readable = readable 9156 self.update = update 9157 self.authorize = authorize 9158 self.autodelete = autodelete 9159 self.represent = list_represent if \ 9160 represent==None and type in ('list:integer','list:string') else represent 9161 self.compute = compute 9162 self.isattachment = True 9163 self.custom_store = custom_store 9164 self.custom_retrieve = custom_retrieve 9165 self.custom_retrieve_file_properties = custom_retrieve_file_properties 9166 self.custom_delete = custom_delete 9167 self.filter_in = filter_in 9168 self.filter_out = filter_out 9169 self.custom_qualifier = custom_qualifier 9170 self.label = label if label!=None else fieldname.replace('_',' ').title() 9171 self.requires = requires if requires!=None else [] 9172 self.map_none = map_none
9173
9174 - def set_attributes(self,*args,**attributes):
9175 self.__dict__.update(*args,**attributes)
9176
9177 - def clone(self,point_self_references_to=False,**args):
9178 field = copy.copy(self) 9179 if point_self_references_to and \ 9180 field.type == 'reference %s'+field._tablename: 9181 field.type = 'reference %s' % point_self_references_to 9182 field.__dict__.update(args) 9183 return field
9184
9185 - def store(self, file, filename=None, path=None):
9186 if self.custom_store: 9187 return self.custom_store(file,filename,path) 9188 if isinstance(file, cgi.FieldStorage): 9189 filename = filename or file.filename 9190 file = file.file 9191 elif not filename: 9192 filename = file.name 9193 filename = os.path.basename(filename.replace('/', os.sep)\ 9194 .replace('\\', os.sep)) 9195 m = REGEX_STORE_PATTERN.search(filename) 9196 extension = m and m.group('e') or 'txt' 9197 uuid_key = web2py_uuid().replace('-', '')[-16:] 9198 encoded_filename = base64.b16encode(filename).lower() 9199 newfilename = '%s.%s.%s.%s' % \ 9200 (self._tablename, self.name, uuid_key, encoded_filename) 9201 newfilename = newfilename[:(self.length - 1 - len(extension))] + '.' + extension 9202 self_uploadfield = self.uploadfield 9203 if isinstance(self_uploadfield,Field): 9204 blob_uploadfield_name = self_uploadfield.uploadfield 9205 keys={self_uploadfield.name: newfilename, 9206 blob_uploadfield_name: file.read()} 9207 self_uploadfield.table.insert(**keys) 9208 elif self_uploadfield == True: 9209 if path: 9210 pass 9211 elif self.uploadfolder: 9212 path = self.uploadfolder 9213 elif self.db._adapter.folder: 9214 path = pjoin(self.db._adapter.folder, '..', 'uploads') 9215 else: 9216 raise RuntimeError( 9217 "you must specify a Field(...,uploadfolder=...)") 9218 if self.uploadseparate: 9219 if self.uploadfs: 9220 raise RuntimeError("not supported") 9221 path = pjoin(path,"%s.%s" %(self._tablename, self.name), 9222 uuid_key[:2]) 9223 if not exists(path): 9224 os.makedirs(path) 9225 pathfilename = pjoin(path, newfilename) 9226 if self.uploadfs: 9227 dest_file = self.uploadfs.open(newfilename, 'wb') 9228 else: 9229 dest_file = open(pathfilename, 'wb') 9230 try: 9231 shutil.copyfileobj(file, dest_file) 9232 except IOError: 9233 raise IOError( 9234 'Unable to store file "%s" because invalid permissions, readonly file system, or filename too long' % pathfilename) 9235 dest_file.close() 9236 return newfilename
9237
9238 - def retrieve(self, name, path=None, nameonly=False):
9239 """ 9240 if nameonly==True return (filename, fullfilename) instead of 9241 (filename, stream) 9242 """ 9243 self_uploadfield = self.uploadfield 9244 if self.custom_retrieve: 9245 return self.custom_retrieve(name, path) 9246 import http 9247 if self.authorize or isinstance(self_uploadfield, str): 9248 row = self.db(self == name).select().first() 9249 if not row: 9250 raise http.HTTP(404) 9251 if self.authorize and not self.authorize(row): 9252 raise http.HTTP(403) 9253 m = REGEX_UPLOAD_PATTERN.match(name) 9254 if not m or not self.isattachment: 9255 raise TypeError('Can\'t retrieve %s' % name) 9256 file_properties = self.retrieve_file_properties(name,path) 9257 filename = file_properties['filename'] 9258 if isinstance(self_uploadfield, str): # ## if file is in DB 9259 stream = StringIO.StringIO(row[self_uploadfield] or '') 9260 elif isinstance(self_uploadfield,Field): 9261 blob_uploadfield_name = self_uploadfield.uploadfield 9262 query = self_uploadfield == name 9263 data = self_uploadfield.table(query)[blob_uploadfield_name] 9264 stream = StringIO.StringIO(data) 9265 elif self.uploadfs: 9266 # ## if file is on pyfilesystem 9267 stream = self.uploadfs.open(name, 'rb') 9268 else: 9269 # ## if file is on regular filesystem 9270 # this is intentially a sting with filename and not a stream 9271 # this propagates and allows stream_file_or_304_or_206 to be called 9272 fullname = pjoin(file_properties['path'],name) 9273 if nameonly: 9274 return (filename, fullname) 9275 stream = open(fullname,'rb') 9276 return (filename, stream)
9277
9278 - def retrieve_file_properties(self, name, path=None):
9279 self_uploadfield = self.uploadfield 9280 if self.custom_retrieve_file_properties: 9281 return self.custom_retrieve_file_properties(name, path) 9282 try: 9283 m = REGEX_UPLOAD_PATTERN.match(name) 9284 if not m or not self.isattachment: 9285 raise TypeError('Can\'t retrieve %s file properties' % name) 9286 filename = base64.b16decode(m.group('name'), True) 9287 filename = REGEX_CLEANUP_FN.sub('_', filename) 9288 except (TypeError, AttributeError): 9289 filename = name 9290 if isinstance(self_uploadfield, str): # ## if file is in DB 9291 return dict(path=None,filename=filename) 9292 elif isinstance(self_uploadfield,Field): 9293 return dict(path=None,filename=filename) 9294 else: 9295 # ## if file is on filesystem 9296 if path: 9297 pass 9298 elif self.uploadfolder: 9299 path = self.uploadfolder 9300 else: 9301 path = pjoin(self.db._adapter.folder, '..', 'uploads') 9302 if self.uploadseparate: 9303 t = m.group('table') 9304 f = m.group('field') 9305 u = m.group('uuidkey') 9306 path = pjoin(path,"%s.%s" % (t,f),u[:2]) 9307 return dict(path=path,filename=filename)
9308 9309
9310 - def formatter(self, value):
9311 requires = self.requires 9312 if value is None or not requires: 9313 return value or self.map_none 9314 if not isinstance(requires, (list, tuple)): 9315 requires = [requires] 9316 elif isinstance(requires, tuple): 9317 requires = list(requires) 9318 else: 9319 requires = copy.copy(requires) 9320 requires.reverse() 9321 for item in requires: 9322 if hasattr(item, 'formatter'): 9323 value = item.formatter(value) 9324 return value
9325
9326 - def validate(self, value):
9327 if not self.requires or self.requires == DEFAULT: 9328 return ((value if value!=self.map_none else None), None) 9329 requires = self.requires 9330 if not isinstance(requires, (list, tuple)): 9331 requires = [requires] 9332 for validator in requires: 9333 (value, error) = validator(value) 9334 if error: 9335 return (value, error) 9336 return ((value if value!=self.map_none else None), None)
9337
9338 - def count(self, distinct=None):
9339 return Expression(self.db, self.db._adapter.COUNT, self, distinct, 'integer')
9340
9341 - def as_dict(self, flat=False, sanitize=True, options=True):
9342 9343 attrs = ('type', 'length', 'default', 'required', 9344 'ondelete', 'notnull', 'unique', 'uploadfield', 9345 'widget', 'label', 'comment', 'writable', 'readable', 9346 'update', 'authorize', 'autodelete', 'represent', 9347 'uploadfolder', 'uploadseparate', 'uploadfs', 9348 'compute', 'custom_store', 'custom_retrieve', 9349 'custom_retrieve_file_properties', 'custom_delete', 9350 'filter_in', 'filter_out', 'custom_qualifier', 9351 'map_none', 'name') 9352 9353 SERIALIZABLE_TYPES = (int, long, basestring, dict, list, 9354 float, tuple, bool, type(None)) 9355 9356 def flatten(obj): 9357 if flat: 9358 if isinstance(obj, flatten.__class__): 9359 return str(type(obj)) 9360 elif isinstance(obj, type): 9361 try: 9362 return str(obj).split("'")[1] 9363 except IndexError: 9364 return str(obj) 9365 elif not isinstance(obj, SERIALIZABLE_TYPES): 9366 return str(obj) 9367 elif isinstance(obj, dict): 9368 newobj = dict() 9369 for k, v in obj.items(): 9370 newobj[k] = flatten(v) 9371 return newobj 9372 elif isinstance(obj, (list, tuple, set)): 9373 return [flatten(v) for v in obj] 9374 else: 9375 return obj 9376 elif isinstance(obj, (dict, set)): 9377 return obj.copy() 9378 else: return obj
9379 9380 def filter_requires(t, r, options=True): 9381 if sanitize and any([keyword in str(t).upper() for 9382 keyword in ("CRYPT", "IS_STRONG")]): 9383 return None 9384 9385 if not isinstance(r, dict): 9386 if options and hasattr(r, "options"): 9387 if callable(r.options): 9388 r.options() 9389 newr = r.__dict__.copy() 9390 else: 9391 newr = r.copy() 9392 9393 # remove options if not required 9394 if not options and newr.has_key("labels"): 9395 [newr.update({key:None}) for key in 9396 ("labels", "theset") if (key in newr)] 9397 9398 for k, v in newr.items(): 9399 if k == "other": 9400 if isinstance(v, dict): 9401 otype, other = v.popitem() 9402 else: 9403 otype = flatten(type(v)) 9404 other = v 9405 newr[k] = {otype: filter_requires(otype, other, 9406 options=options)} 9407 else: 9408 newr[k] = flatten(v) 9409 return newr
9410 9411 if isinstance(self.requires, (tuple, list, set)): 9412 requires = dict([(flatten(type(r)), 9413 filter_requires(type(r), r, 9414 options=options)) for 9415 r in self.requires]) 9416 else: 9417 requires = {flatten(type(self.requires)): 9418 filter_requires(type(self.requires), 9419 self.requires, options=options)} 9420 9421 d = dict(colname="%s.%s" % (self.tablename, self.name), 9422 requires=requires) 9423 d.update([(attr, flatten(getattr(self, attr))) for attr in attrs]) 9424 return d 9425
9426 - def as_xml(self, sanitize=True, options=True):
9427 if have_serializers: 9428 xml = serializers.xml 9429 else: 9430 raise ImportError("No xml serializers available") 9431 d = self.as_dict(flat=True, sanitize=sanitize, 9432 options=options) 9433 return xml(d)
9434
9435 - def as_json(self, sanitize=True, options=True):
9436 if have_serializers: 9437 json = serializers.json 9438 else: 9439 raise ImportError("No json serializers available") 9440 d = self.as_dict(flat=True, sanitize=sanitize, 9441 options=options) 9442 return json(d)
9443
9444 - def as_yaml(self, sanitize=True, options=True):
9445 if have_serializers: 9446 d = self.as_dict(flat=True, sanitize=sanitize, 9447 options=options) 9448 return serializers.yaml(d) 9449 else: 9450 raise ImportError("No YAML serializers available")
9451
9452 - def __nonzero__(self):
9453 return True
9454
9455 - def __str__(self):
9456 try: 9457 return '%s.%s' % (self.tablename, self.name) 9458 except: 9459 return '<no table>.%s' % self.name
9460
9461 9462 -class Query(object):
9463 9464 """ 9465 a query object necessary to define a set. 9466 it can be stored or can be passed to DAL.__call__() to obtain a Set 9467 9468 Example:: 9469 9470 query = db.users.name=='Max' 9471 set = db(query) 9472 records = set.select() 9473 9474 """ 9475
9476 - def __init__( 9477 self, 9478 db, 9479 op, 9480 first=None, 9481 second=None, 9482 ignore_common_filters = False, 9483 **optional_args 9484 ):
9485 self.db = self._db = db 9486 self.op = op 9487 self.first = first 9488 self.second = second 9489 self.ignore_common_filters = ignore_common_filters 9490 self.optional_args = optional_args
9491
9492 - def __repr__(self):
9493 return '<Query %s>' % BaseAdapter.expand(self.db._adapter,self)
9494
9495 - def __str__(self):
9496 return self.db._adapter.expand(self)
9497
9498 - def __and__(self, other):
9499 return Query(self.db,self.db._adapter.AND,self,other)
9500
9501 - def __or__(self, other):
9502 return Query(self.db,self.db._adapter.OR,self,other)
9503
9504 - def __invert__(self):
9505 if self.op==self.db._adapter.NOT: 9506 return self.first 9507 return Query(self.db,self.db._adapter.NOT,self)
9508
9509 - def __eq__(self, other):
9510 return repr(self) == repr(other)
9511
9512 - def __ne__(self, other):
9513 return not (self == other)
9514
9515 - def case(self,t=1,f=0):
9516 return self.db._adapter.CASE(self,t,f)
9517
9518 - def as_dict(self, flat=False, sanitize=True):
9519 """Experimental stuff 9520 9521 This allows to return a plain dictionary with the basic 9522 query representation. Can be used with json/xml services 9523 for client-side db I/O 9524 9525 Example: 9526 >>> q = db.auth_user.id != 0 9527 >>> q.as_dict(flat=True) 9528 {"op": "NE", "first":{"tablename": "auth_user", 9529 "fieldname": "id"}, 9530 "second":0} 9531 """ 9532 9533 SERIALIZABLE_TYPES = (tuple, dict, list, int, long, float, 9534 basestring, type(None), bool) 9535 def loop(d): 9536 newd = dict() 9537 for k, v in d.items(): 9538 if k in ("first", "second"): 9539 if isinstance(v, self.__class__): 9540 newd[k] = loop(v.__dict__) 9541 elif isinstance(v, Field): 9542 newd[k] = {"tablename": v._tablename, 9543 "fieldname": v.name} 9544 elif isinstance(v, Expression): 9545 newd[k] = loop(v.__dict__) 9546 elif isinstance(v, SERIALIZABLE_TYPES): 9547 newd[k] = v 9548 elif isinstance(v, (datetime.date, 9549 datetime.time, 9550 datetime.datetime)): 9551 newd[k] = unicode(v) 9552 elif k == "op": 9553 if callable(v): 9554 newd[k] = v.__name__ 9555 elif isinstance(v, basestring): 9556 newd[k] = v 9557 else: pass # not callable or string 9558 elif isinstance(v, SERIALIZABLE_TYPES): 9559 if isinstance(v, dict): 9560 newd[k] = loop(v) 9561 else: newd[k] = v 9562 return newd
9563 9564 if flat: 9565 return loop(self.__dict__) 9566 else: return self.__dict__
9567 9568
9569 - def as_xml(self, sanitize=True):
9570 if have_serializers: 9571 xml = serializers.xml 9572 else: 9573 raise ImportError("No xml serializers available") 9574 d = self.as_dict(flat=True, sanitize=sanitize) 9575 return xml(d)
9576
9577 - def as_json(self, sanitize=True):
9578 if have_serializers: 9579 json = serializers.json 9580 else: 9581 raise ImportError("No json serializers available") 9582 d = self.as_dict(flat=True, sanitize=sanitize) 9583 return json(d)
9584
9585 -def xorify(orderby):
9586 if not orderby: 9587 return None 9588 orderby2 = orderby[0] 9589 for item in orderby[1:]: 9590 orderby2 = orderby2 | item 9591 return orderby2
9592
9593 -def use_common_filters(query):
9594 return (query and hasattr(query,'ignore_common_filters') and \ 9595 not query.ignore_common_filters)
9596
9597 -class Set(object):
9598 9599 """ 9600 a Set represents a set of records in the database, 9601 the records are identified by the query=Query(...) object. 9602 normally the Set is generated by DAL.__call__(Query(...)) 9603 9604 given a set, for example 9605 set = db(db.users.name=='Max') 9606 you can: 9607 set.update(db.users.name='Massimo') 9608 set.delete() # all elements in the set 9609 set.select(orderby=db.users.id, groupby=db.users.name, limitby=(0,10)) 9610 and take subsets: 9611 subset = set(db.users.id<5) 9612 """ 9613
9614 - def __init__(self, db, query, ignore_common_filters = None):
9615 self.db = db 9616 self._db = db # for backward compatibility 9617 self.dquery = None 9618 9619 # if query is a dict, parse it 9620 if isinstance(query, dict): 9621 query = self.parse(query) 9622 9623 if not ignore_common_filters is None and \ 9624 use_common_filters(query) == ignore_common_filters: 9625 query = copy.copy(query) 9626 query.ignore_common_filters = ignore_common_filters 9627 self.query = query
9628
9629 - def __repr__(self):
9630 return '<Set %s>' % BaseAdapter.expand(self.db._adapter,self.query)
9631
9632 - def __call__(self, query, ignore_common_filters=False):
9633 if isinstance(query,Table): 9634 query = self.db._adapter.id_query(query) 9635 elif isinstance(query,str): 9636 query = Expression(self.db,query) 9637 elif isinstance(query,Field): 9638 query = query!=None 9639 if self.query: 9640 return Set(self.db, self.query & query, 9641 ignore_common_filters=ignore_common_filters) 9642 else: 9643 return Set(self.db, query, 9644 ignore_common_filters=ignore_common_filters)
9645
9646 - def _count(self,distinct=None):
9647 return self.db._adapter._count(self.query,distinct)
9648
9649 - def _select(self, *fields, **attributes):
9650 adapter = self.db._adapter 9651 tablenames = adapter.tables(self.query, 9652 attributes.get('join',None), 9653 attributes.get('left',None), 9654 attributes.get('orderby',None), 9655 attributes.get('groupby',None)) 9656 fields = adapter.expand_all(fields, tablenames) 9657 return adapter._select(self.query,fields,attributes)
9658
9659 - def _delete(self):
9660 db = self.db 9661 tablename = db._adapter.get_table(self.query) 9662 return db._adapter._delete(tablename,self.query)
9663
9664 - def _update(self, **update_fields):
9665 db = self.db 9666 tablename = db._adapter.get_table(self.query) 9667 fields = db[tablename]._listify(update_fields,update=True) 9668 return db._adapter._update(tablename,self.query,fields)
9669
9670 - def as_dict(self, flat=False, sanitize=True):
9671 if flat: 9672 uid = dbname = uri = None 9673 codec = self.db._db_codec 9674 if not sanitize: 9675 uri, dbname, uid = (self.db._dbname, str(self.db), 9676 self.db._db_uid) 9677 d = {"query": self.query.as_dict(flat=flat)} 9678 d["db"] = {"uid": uid, "codec": codec, 9679 "name": dbname, "uri": uri} 9680 return d 9681 else: return self.__dict__
9682
9683 - def as_xml(self, sanitize=True):
9684 if have_serializers: 9685 xml = serializers.xml 9686 else: 9687 raise ImportError("No xml serializers available") 9688 d = self.as_dict(flat=True, sanitize=sanitize) 9689 return xml(d)
9690
9691 - def as_json(self, sanitize=True):
9692 if have_serializers: 9693 json = serializers.json 9694 else: 9695 raise ImportError("No json serializers available") 9696 d = self.as_dict(flat=True, sanitize=sanitize) 9697 return json(d)
9698
9699 - def parse(self, dquery):
9700 "Experimental: Turn a dictionary into a Query object" 9701 self.dquery = dquery 9702 return self.build(self.dquery)
9703
9704 - def build(self, d):
9705 "Experimental: see .parse()" 9706 op, first, second = (d["op"], d["first"], 9707 d.get("second", None)) 9708 left = right = built = None 9709 9710 if op in ("AND", "OR"): 9711 if not (type(first), type(second)) == (dict, dict): 9712 raise SyntaxError("Invalid AND/OR query") 9713 if op == "AND": 9714 built = self.build(first) & self.build(second) 9715 else: built = self.build(first) | self.build(second) 9716 9717 elif op == "NOT": 9718 if first is None: 9719 raise SyntaxError("Invalid NOT query") 9720 built = ~self.build(first) 9721 else: 9722 # normal operation (GT, EQ, LT, ...) 9723 for k, v in {"left": first, "right": second}.items(): 9724 if isinstance(v, dict) and v.get("op"): 9725 v = self.build(v) 9726 if isinstance(v, dict) and ("tablename" in v): 9727 v = self.db[v["tablename"]][v["fieldname"]] 9728 if k == "left": left = v 9729 else: right = v 9730 9731 if hasattr(self.db._adapter, op): 9732 opm = getattr(self.db._adapter, op) 9733 9734 if op == "EQ": built = left == right 9735 elif op == "NE": built = left != right 9736 elif op == "GT": built = left > right 9737 elif op == "GE": built = left >= right 9738 elif op == "LT": built = left < right 9739 elif op == "LE": built = left <= right 9740 elif op in ("JOIN", "LEFT_JOIN", "RANDOM", "ALLOW_NULL"): 9741 built = Expression(self.db, opm) 9742 elif op in ("LOWER", "UPPER", "EPOCH", "PRIMARY_KEY", 9743 "COALESCE_ZERO", "RAW", "INVERT"): 9744 built = Expression(self.db, opm, left) 9745 elif op in ("COUNT", "EXTRACT", "AGGREGATE", "SUBSTRING", 9746 "REGEXP", "LIKE", "ILIKE", "STARTSWITH", 9747 "ENDSWITH", "ADD", "SUB", "MUL", "DIV", 9748 "MOD", "AS", "ON", "COMMA", "NOT_NULL", 9749 "COALESCE", "CONTAINS", "BELONGS"): 9750 built = Expression(self.db, opm, left, right) 9751 # expression as string 9752 elif not (left or right): built = Expression(self.db, op) 9753 else: 9754 raise SyntaxError("Operator not supported: %s" % op) 9755 9756 return built
9757
9758 - def isempty(self):
9759 return not self.select(limitby=(0,1))
9760
9761 - def count(self,distinct=None, cache=None):
9762 db = self.db 9763 if cache: 9764 cache_model, time_expire = cache 9765 sql = self._count(distinct=distinct) 9766 key = db._uri + '/' + sql 9767 if len(key)>200: key = hashlib_md5(key).hexdigest() 9768 return cache_model( 9769 key, 9770 (lambda self=self,distinct=distinct: \ 9771 db._adapter.count(self.query,distinct)), 9772 time_expire) 9773 return db._adapter.count(self.query,distinct)
9774
9775 - def select(self, *fields, **attributes):
9776 adapter = self.db._adapter 9777 tablenames = adapter.tables(self.query, 9778 attributes.get('join',None), 9779 attributes.get('left',None), 9780 attributes.get('orderby',None), 9781 attributes.get('groupby',None)) 9782 fields = adapter.expand_all(fields, tablenames) 9783 return adapter.select(self.query,fields,attributes)
9784
9785 - def nested_select(self,*fields,**attributes):
9786 return Expression(self.db,self._select(*fields,**attributes))
9787
9788 - def delete(self):
9789 db = self.db 9790 tablename = db._adapter.get_table(self.query) 9791 table = db[tablename] 9792 if any(f(self) for f in table._before_delete): return 0 9793 ret = db._adapter.delete(tablename,self.query) 9794 ret and [f(self) for f in table._after_delete] 9795 return ret
9796
9797 - def update(self, **update_fields):
9798 db = self.db 9799 tablename = db._adapter.get_table(self.query) 9800 table = db[tablename] 9801 table._attempt_upload(update_fields) 9802 if any(f(self,update_fields) for f in table._before_update): 9803 return 0 9804 fields = table._listify(update_fields,update=True) 9805 if not fields: 9806 raise SyntaxError("No fields to update") 9807 ret = db._adapter.update(tablename,self.query,fields) 9808 ret and [f(self,update_fields) for f in table._after_update] 9809 return ret
9810
9811 - def update_naive(self, **update_fields):
9812 """ 9813 same as update but does not call table._before_update and _after_update 9814 """ 9815 tablename = self.db._adapter.get_table(self.query) 9816 table = self.db[tablename] 9817 fields = table._listify(update_fields,update=True) 9818 if not fields: raise SyntaxError("No fields to update") 9819 ret = self.db._adapter.update(tablename,self.query,fields) 9820 return ret
9821
9822 - def validate_and_update(self, **update_fields):
9823 tablename = self.db._adapter.get_table(self.query) 9824 response = Row() 9825 response.errors = Row() 9826 new_fields = copy.copy(update_fields) 9827 for key,value in update_fields.iteritems(): 9828 value,error = self.db[tablename][key].validate(value) 9829 if error: 9830 response.errors[key] = error 9831 else: 9832 new_fields[key] = value 9833 table = self.db[tablename] 9834 if response.errors: 9835 response.updated = None 9836 else: 9837 if not any(f(self,new_fields) for f in table._before_update): 9838 fields = table._listify(new_fields,update=True) 9839 if not fields: raise SyntaxError("No fields to update") 9840 ret = self.db._adapter.update(tablename,self.query,fields) 9841 ret and [f(self,new_fields) for f in table._after_update] 9842 else: 9843 ret = 0 9844 response.updated = ret 9845 return response
9846
9847 - def delete_uploaded_files(self, upload_fields=None):
9848 table = self.db[self.db._adapter.tables(self.query)[0]] 9849 # ## mind uploadfield==True means file is not in DB 9850 if upload_fields: 9851 fields = upload_fields.keys() 9852 else: 9853 fields = table.fields 9854 fields = [f for f in fields if table[f].type == 'upload' 9855 and table[f].uploadfield == True 9856 and table[f].autodelete] 9857 if not fields: 9858 return False 9859 for record in self.select(*[table[f] for f in fields]): 9860 for fieldname in fields: 9861 field = table[fieldname] 9862 oldname = record.get(fieldname, None) 9863 if not oldname: 9864 continue 9865 if upload_fields and oldname == upload_fields[fieldname]: 9866 continue 9867 if field.custom_delete: 9868 field.custom_delete(oldname) 9869 else: 9870 uploadfolder = field.uploadfolder 9871 if not uploadfolder: 9872 uploadfolder = pjoin( 9873 self.db._adapter.folder, '..', 'uploads') 9874 if field.uploadseparate: 9875 items = oldname.split('.') 9876 uploadfolder = pjoin( 9877 uploadfolder, 9878 "%s.%s" % (items[0], items[1]), 9879 items[2][:2]) 9880 oldpath = pjoin(uploadfolder, oldname) 9881 if exists(oldpath): 9882 os.unlink(oldpath) 9883 return False
9884
9885 -class RecordUpdater(object):
9886 - def __init__(self, colset, table, id):
9887 self.colset, self.db, self.tablename, self.id = \ 9888 colset, table._db, table._tablename, id
9889
9890 - def __call__(self, **fields):
9891 colset, db, tablename, id = self.colset, self.db, self.tablename, self.id 9892 table = db[tablename] 9893 newfields = fields or dict(colset) 9894 for fieldname in newfields.keys(): 9895 if not fieldname in table.fields or table[fieldname].type=='id': 9896 del newfields[fieldname] 9897 table._db(table._id==id,ignore_common_filters=True).update(**newfields) 9898 colset.update(newfields) 9899 return colset
9900
9901 -class RecordDeleter(object):
9902 - def __init__(self, table, id):
9903 self.db, self.tablename, self.id = table._db, table._tablename, id
9904 - def __call__(self):
9905 return self.db(self.db[self.tablename]._id==self.id).delete()
9906
9907 -class LazySet(object):
9908 - def __init__(self, field, id):
9909 self.db, self.tablename, self.fieldname, self.id = \ 9910 field.db, field._tablename, field.name, id
9911 - def _getset(self):
9912 query = self.db[self.tablename][self.fieldname]==self.id 9913 return Set(self.db,query)
9914 - def __repr__(self):
9915 return repr(self._getset())
9916 - def __call__(self, query, ignore_common_filters=False):
9917 return self._getset()(query, ignore_common_filters)
9918 - def _count(self,distinct=None):
9919 return self._getset()._count(distinct)
9920 - def _select(self, *fields, **attributes):
9921 return self._getset()._select(*fields,**attributes)
9922 - def _delete(self):
9923 return self._getset()._delete()
9924 - def _update(self, **update_fields):
9925 return self._getset()._update(**update_fields)
9926 - def isempty(self):
9927 return self._getset().isempty()
9928 - def count(self,distinct=None, cache=None):
9929 return self._getset().count(distinct,cache)
9930 - def select(self, *fields, **attributes):
9931 return self._getset().select(*fields,**attributes)
9932 - def nested_select(self,*fields,**attributes):
9933 return self._getset().nested_select(*fields,**attributes)
9934 - def delete(self):
9935 return self._getset().delete()
9936 - def update(self, **update_fields):
9937 return self._getset().update(**update_fields)
9938 - def update_naive(self, **update_fields):
9939 return self._getset().update_naive(**update_fields)
9940 - def validate_and_update(self, **update_fields):
9941 return self._getset().validate_and_update(**update_fields)
9942 - def delete_uploaded_files(self, upload_fields=None):
9943 return self._getset().delete_uploaded_files(upload_fields)
9944
9945 -class VirtualCommand(object):
9946 - def __init__(self,method,row):
9947 self.method=method 9948 self.row=row
9949 - def __call__(self,*args,**kwargs):
9950 return self.method(self.row,*args,**kwargs)
9951
9952 -def lazy_virtualfield(f):
9953 f.__lazy__ = True 9954 return f
9955
9956 -class Rows(object):
9957 9958 """ 9959 A wrapper for the return value of a select. It basically represents a table. 9960 It has an iterator and each row is represented as a dictionary. 9961 """ 9962 9963 # ## TODO: this class still needs some work to care for ID/OID 9964
9965 - def __init__( 9966 self, 9967 db=None, 9968 records=[], 9969 colnames=[], 9970 compact=True, 9971 rawrows=None 9972 ):
9973 self.db = db 9974 self.records = records 9975 self.colnames = colnames 9976 self.compact = compact 9977 self.response = rawrows
9978
9979 - def __repr__(self):
9980 return '<Rows (%s)>' % len(self.records)
9981
9982 - def setvirtualfields(self,**keyed_virtualfields):
9983 """ 9984 db.define_table('x',Field('number','integer')) 9985 if db(db.x).isempty(): [db.x.insert(number=i) for i in range(10)] 9986 9987 from gluon.dal import lazy_virtualfield 9988 9989 class MyVirtualFields(object): 9990 # normal virtual field (backward compatible, discouraged) 9991 def normal_shift(self): return self.x.number+1 9992 # lazy virtual field (because of @staticmethod) 9993 @lazy_virtualfield 9994 def lazy_shift(instance,row,delta=4): return row.x.number+delta 9995 db.x.virtualfields.append(MyVirtualFields()) 9996 9997 for row in db(db.x).select(): 9998 print row.number, row.normal_shift, row.lazy_shift(delta=7) 9999 """ 10000 if not keyed_virtualfields: 10001 return self 10002 for row in self.records: 10003 for (tablename,virtualfields) in keyed_virtualfields.iteritems(): 10004 attributes = dir(virtualfields) 10005 if not tablename in row: 10006 box = row[tablename] = Row() 10007 else: 10008 box = row[tablename] 10009 updated = False 10010 for attribute in attributes: 10011 if attribute[0] != '_': 10012 method = getattr(virtualfields,attribute) 10013 if hasattr(method,'__lazy__'): 10014 box[attribute]=VirtualCommand(method,row) 10015 elif type(method)==types.MethodType: 10016 if not updated: 10017 virtualfields.__dict__.update(row) 10018 updated = True 10019 box[attribute]=method() 10020 return self
10021
10022 - def __and__(self,other):
10023 if self.colnames!=other.colnames: 10024 raise Exception('Cannot & incompatible Rows objects') 10025 records = self.records+other.records 10026 return Rows(self.db,records,self.colnames)
10027
10028 - def __or__(self,other):
10029 if self.colnames!=other.colnames: 10030 raise Exception('Cannot | incompatible Rows objects') 10031 records = self.records 10032 records += [record for record in other.records \ 10033 if not record in records] 10034 return Rows(self.db,records,self.colnames)
10035
10036 - def __nonzero__(self):
10037 if len(self.records): 10038 return 1 10039 return 0
10040
10041 - def __len__(self):
10042 return len(self.records)
10043
10044 - def __getslice__(self, a, b):
10045 return Rows(self.db,self.records[a:b],self.colnames)
10046
10047 - def __getitem__(self, i):
10048 row = self.records[i] 10049 keys = row.keys() 10050 if self.compact and len(keys) == 1 and keys[0] != '_extra': 10051 return row[row.keys()[0]] 10052 return row
10053
10054 - def __iter__(self):
10055 """ 10056 iterator over records 10057 """ 10058 10059 for i in xrange(len(self)): 10060 yield self[i]
10061
10062 - def __str__(self):
10063 """ 10064 serializes the table into a csv file 10065 """ 10066 10067 s = StringIO.StringIO() 10068 self.export_to_csv_file(s) 10069 return s.getvalue()
10070
10071 - def first(self):
10072 if not self.records: 10073 return None 10074 return self[0]
10075
10076 - def last(self):
10077 if not self.records: 10078 return None 10079 return self[-1]
10080
10081 - def find(self,f,limitby=None):
10082 """ 10083 returns a new Rows object, a subset of the original object, 10084 filtered by the function f 10085 """ 10086 if not self: 10087 return Rows(self.db, [], self.colnames) 10088 records = [] 10089 if limitby: 10090 a,b = limitby 10091 else: 10092 a,b = 0,len(self) 10093 k = 0 10094 for row in self: 10095 if f(row): 10096 if a<=k: records.append(row) 10097 k += 1 10098 if k==b: break 10099 return Rows(self.db, records, self.colnames)
10100
10101 - def exclude(self, f):
10102 """ 10103 removes elements from the calling Rows object, filtered by the function f, 10104 and returns a new Rows object containing the removed elements 10105 """ 10106 if not self.records: 10107 return Rows(self.db, [], self.colnames) 10108 removed = [] 10109 i=0 10110 while i<len(self): 10111 row = self[i] 10112 if f(row): 10113 removed.append(self.records[i]) 10114 del self.records[i] 10115 else: 10116 i += 1 10117 return Rows(self.db, removed, self.colnames)
10118
10119 - def sort(self, f, reverse=False):
10120 """ 10121 returns a list of sorted elements (not sorted in place) 10122 """ 10123 rows = Rows(self.db,[],self.colnames,compact=False) 10124 rows.records = sorted(self,key=f,reverse=reverse) 10125 return rows
10126 10127
10128 - def group_by_value(self, field):
10129 """ 10130 regroups the rows, by one of the fields 10131 """ 10132 if not self.records: 10133 return {} 10134 key = str(field) 10135 grouped_row_group = dict() 10136 10137 for row in self: 10138 value = row[key] 10139 if not value in grouped_row_group: 10140 grouped_row_group[value] = [row] 10141 else: 10142 grouped_row_group[value].append(row) 10143 return grouped_row_group
10144
10145 - def as_list(self, 10146 compact=True, 10147 storage_to_dict=True, 10148 datetime_to_str=True, 10149 custom_types=None):
10150 """ 10151 returns the data as a list or dictionary. 10152 :param storage_to_dict: when True returns a dict, otherwise a list(default True) 10153 :param datetime_to_str: convert datetime fields as strings (default True) 10154 """ 10155 (oc, self.compact) = (self.compact, compact) 10156 if storage_to_dict: 10157 items = [item.as_dict(datetime_to_str, custom_types) for item in self] 10158 else: 10159 items = [item for item in self] 10160 self.compact = compact 10161 return items
10162 10163
10164 - def as_dict(self, 10165 key='id', 10166 compact=True, 10167 storage_to_dict=True, 10168 datetime_to_str=True, 10169 custom_types=None):
10170 """ 10171 returns the data as a dictionary of dictionaries (storage_to_dict=True) or records (False) 10172 10173 :param key: the name of the field to be used as dict key, normally the id 10174 :param compact: ? (default True) 10175 :param storage_to_dict: when True returns a dict, otherwise a list(default True) 10176 :param datetime_to_str: convert datetime fields as strings (default True) 10177 """ 10178 10179 # test for multiple rows 10180 multi = False 10181 f = self.first() 10182 if f: 10183 multi = any([isinstance(v, f.__class__) for v in f.values()]) 10184 if (not "." in key) and multi: 10185 # No key provided, default to int indices 10186 def new_key(): 10187 i = 0 10188 while True: 10189 yield i 10190 i += 1
10191 key_generator = new_key() 10192 key = lambda r: key_generator.next() 10193 10194 rows = self.as_list(compact, storage_to_dict, datetime_to_str, custom_types) 10195 if isinstance(key,str) and key.count('.')==1: 10196 (table, field) = key.split('.') 10197 return dict([(r[table][field],r) for r in rows]) 10198 elif isinstance(key,str): 10199 return dict([(r[key],r) for r in rows]) 10200 else: 10201 return dict([(key(r),r) for r in rows])
10202
10203 - def export_to_csv_file(self, ofile, null='<NULL>', *args, **kwargs):
10204 """ 10205 export data to csv, the first line contains the column names 10206 10207 :param ofile: where the csv must be exported to 10208 :param null: how null values must be represented (default '<NULL>') 10209 :param delimiter: delimiter to separate values (default ',') 10210 :param quotechar: character to use to quote string values (default '"') 10211 :param quoting: quote system, use csv.QUOTE_*** (default csv.QUOTE_MINIMAL) 10212 :param represent: use the fields .represent value (default False) 10213 :param colnames: list of column names to use (default self.colnames) 10214 This will only work when exporting rows objects!!!! 10215 DO NOT use this with db.export_to_csv() 10216 """ 10217 delimiter = kwargs.get('delimiter', ',') 10218 quotechar = kwargs.get('quotechar', '"') 10219 quoting = kwargs.get('quoting', csv.QUOTE_MINIMAL) 10220 represent = kwargs.get('represent', False) 10221 writer = csv.writer(ofile, delimiter=delimiter, 10222 quotechar=quotechar, quoting=quoting) 10223 colnames = kwargs.get('colnames', self.colnames) 10224 write_colnames = kwargs.get('write_colnames',True) 10225 # a proper csv starting with the column names 10226 if write_colnames: 10227 writer.writerow(colnames) 10228 10229 def none_exception(value): 10230 """ 10231 returns a cleaned up value that can be used for csv export: 10232 - unicode text is encoded as such 10233 - None values are replaced with the given representation (default <NULL>) 10234 """ 10235 if value is None: 10236 return null 10237 elif isinstance(value, unicode): 10238 return value.encode('utf8') 10239 elif isinstance(value,Reference): 10240 return int(value) 10241 elif hasattr(value, 'isoformat'): 10242 return value.isoformat()[:19].replace('T', ' ') 10243 elif isinstance(value, (list,tuple)): # for type='list:..' 10244 return bar_encode(value) 10245 return value
10246 10247 for record in self: 10248 row = [] 10249 for col in colnames: 10250 if not REGEX_TABLE_DOT_FIELD.match(col): 10251 row.append(record._extra[col]) 10252 else: 10253 (t, f) = col.split('.') 10254 field = self.db[t][f] 10255 if isinstance(record.get(t, None), (Row,dict)): 10256 value = record[t][f] 10257 else: 10258 value = record[f] 10259 if field.type=='blob' and not value is None: 10260 value = base64.b64encode(value) 10261 elif represent and field.represent: 10262 value = field.represent(value) 10263 row.append(none_exception(value)) 10264 writer.writerow(row) 10265
10266 - def xml(self,strict=False,row_name='row',rows_name='rows'):
10267 """ 10268 serializes the table using sqlhtml.SQLTABLE (if present) 10269 """ 10270 10271 if strict: 10272 ncols = len(self.colnames) 10273 return '<%s>\n%s\n</%s>' % (rows_name, 10274 '\n'.join(row.as_xml(row_name=row_name, 10275 colnames=self.colnames) for 10276 row in self), rows_name) 10277 10278 import sqlhtml 10279 return sqlhtml.SQLTABLE(self).xml()
10280
10281 - def as_xml(self,row_name='row',rows_name='rows'):
10282 return self.xml(strict=True, row_name=row_name, rows_name=rows_name)
10283
10284 - def as_json(self, mode='object', default=None):
10285 """ 10286 serializes the table to a JSON list of objects 10287 """ 10288 10289 items = [record.as_json(mode=mode, default=default, 10290 serialize=False, 10291 colnames=self.colnames) for 10292 record in self] 10293 10294 if have_serializers: 10295 return serializers.json(items, 10296 default=default or 10297 serializers.custom_json) 10298 elif simplejson: 10299 return simplejson.dumps(items) 10300 else: 10301 raise RuntimeError("missing simplejson")
10302 10303 # for consistent naming yet backwards compatible 10304 as_csv = __str__ 10305 json = as_json 10306
10307 ################################################################################ 10308 # dummy function used to define some doctests 10309 ################################################################################ 10310 10311 -def test_all():
10312 """ 10313 10314 >>> if len(sys.argv)<2: db = DAL(\"sqlite://test.db\") 10315 >>> if len(sys.argv)>1: db = DAL(sys.argv[1]) 10316 >>> tmp = db.define_table('users',\ 10317 Field('stringf', 'string', length=32, required=True),\ 10318 Field('booleanf', 'boolean', default=False),\ 10319 Field('passwordf', 'password', notnull=True),\ 10320 Field('uploadf', 'upload'),\ 10321 Field('blobf', 'blob'),\ 10322 Field('integerf', 'integer', unique=True),\ 10323 Field('doublef', 'double', unique=True,notnull=True),\ 10324 Field('jsonf', 'json'),\ 10325 Field('datef', 'date', default=datetime.date.today()),\ 10326 Field('timef', 'time'),\ 10327 Field('datetimef', 'datetime'),\ 10328 migrate='test_user.table') 10329 10330 Insert a field 10331 10332 >>> db.users.insert(stringf='a', booleanf=True, passwordf='p', blobf='0A',\ 10333 uploadf=None, integerf=5, doublef=3.14,\ 10334 jsonf={"j": True},\ 10335 datef=datetime.date(2001, 1, 1),\ 10336 timef=datetime.time(12, 30, 15),\ 10337 datetimef=datetime.datetime(2002, 2, 2, 12, 30, 15)) 10338 1 10339 10340 Drop the table 10341 10342 >>> db.users.drop() 10343 10344 Examples of insert, select, update, delete 10345 10346 >>> tmp = db.define_table('person',\ 10347 Field('name'),\ 10348 Field('birth','date'),\ 10349 migrate='test_person.table') 10350 >>> person_id = db.person.insert(name=\"Marco\",birth='2005-06-22') 10351 >>> person_id = db.person.insert(name=\"Massimo\",birth='1971-12-21') 10352 10353 commented len(db().select(db.person.ALL)) 10354 commented 2 10355 10356 >>> me = db(db.person.id==person_id).select()[0] # test select 10357 >>> me.name 10358 'Massimo' 10359 >>> db.person[2].name 10360 'Massimo' 10361 >>> db.person(2).name 10362 'Massimo' 10363 >>> db.person(name='Massimo').name 10364 'Massimo' 10365 >>> db.person(db.person.name=='Massimo').name 10366 'Massimo' 10367 >>> row = db.person[2] 10368 >>> row.name == row['name'] == row['person.name'] == row('person.name') 10369 True 10370 >>> db(db.person.name=='Massimo').update(name='massimo') # test update 10371 1 10372 >>> db(db.person.name=='Marco').select().first().delete_record() # test delete 10373 1 10374 10375 Update a single record 10376 10377 >>> me.update_record(name=\"Max\") 10378 <Row {'name': 'Max', 'birth': datetime.date(1971, 12, 21), 'id': 2}> 10379 >>> me.name 10380 'Max' 10381 10382 Examples of complex search conditions 10383 10384 >>> len(db((db.person.name=='Max')&(db.person.birth<'2003-01-01')).select()) 10385 1 10386 >>> len(db((db.person.name=='Max')&(db.person.birth<datetime.date(2003,01,01))).select()) 10387 1 10388 >>> len(db((db.person.name=='Max')|(db.person.birth<'2003-01-01')).select()) 10389 1 10390 >>> me = db(db.person.id==person_id).select(db.person.name)[0] 10391 >>> me.name 10392 'Max' 10393 10394 Examples of search conditions using extract from date/datetime/time 10395 10396 >>> len(db(db.person.birth.month()==12).select()) 10397 1 10398 >>> len(db(db.person.birth.year()>1900).select()) 10399 1 10400 10401 Example of usage of NULL 10402 10403 >>> len(db(db.person.birth==None).select()) ### test NULL 10404 0 10405 >>> len(db(db.person.birth!=None).select()) ### test NULL 10406 1 10407 10408 Examples of search conditions using lower, upper, and like 10409 10410 >>> len(db(db.person.name.upper()=='MAX').select()) 10411 1 10412 >>> len(db(db.person.name.like('%ax')).select()) 10413 1 10414 >>> len(db(db.person.name.upper().like('%AX')).select()) 10415 1 10416 >>> len(db(~db.person.name.upper().like('%AX')).select()) 10417 0 10418 10419 orderby, groupby and limitby 10420 10421 >>> people = db().select(db.person.name, orderby=db.person.name) 10422 >>> order = db.person.name|~db.person.birth 10423 >>> people = db().select(db.person.name, orderby=order) 10424 10425 >>> people = db().select(db.person.name, orderby=db.person.name, groupby=db.person.name) 10426 10427 >>> people = db().select(db.person.name, orderby=order, limitby=(0,100)) 10428 10429 Example of one 2 many relation 10430 10431 >>> tmp = db.define_table('dog',\ 10432 Field('name'),\ 10433 Field('birth','date'),\ 10434 Field('owner',db.person),\ 10435 migrate='test_dog.table') 10436 >>> db.dog.insert(name='Snoopy', birth=None, owner=person_id) 10437 1 10438 10439 A simple JOIN 10440 10441 >>> len(db(db.dog.owner==db.person.id).select()) 10442 1 10443 10444 >>> len(db().select(db.person.ALL, db.dog.name,left=db.dog.on(db.dog.owner==db.person.id))) 10445 1 10446 10447 Drop tables 10448 10449 >>> db.dog.drop() 10450 >>> db.person.drop() 10451 10452 Example of many 2 many relation and Set 10453 10454 >>> tmp = db.define_table('author', Field('name'),\ 10455 migrate='test_author.table') 10456 >>> tmp = db.define_table('paper', Field('title'),\ 10457 migrate='test_paper.table') 10458 >>> tmp = db.define_table('authorship',\ 10459 Field('author_id', db.author),\ 10460 Field('paper_id', db.paper),\ 10461 migrate='test_authorship.table') 10462 >>> aid = db.author.insert(name='Massimo') 10463 >>> pid = db.paper.insert(title='QCD') 10464 >>> tmp = db.authorship.insert(author_id=aid, paper_id=pid) 10465 10466 Define a Set 10467 10468 >>> authored_papers = db((db.author.id==db.authorship.author_id)&(db.paper.id==db.authorship.paper_id)) 10469 >>> rows = authored_papers.select(db.author.name, db.paper.title) 10470 >>> for row in rows: print row.author.name, row.paper.title 10471 Massimo QCD 10472 10473 Example of search condition using belongs 10474 10475 >>> set = (1, 2, 3) 10476 >>> rows = db(db.paper.id.belongs(set)).select(db.paper.ALL) 10477 >>> print rows[0].title 10478 QCD 10479 10480 Example of search condition using nested select 10481 10482 >>> nested_select = db()._select(db.authorship.paper_id) 10483 >>> rows = db(db.paper.id.belongs(nested_select)).select(db.paper.ALL) 10484 >>> print rows[0].title 10485 QCD 10486 10487 Example of expressions 10488 10489 >>> mynumber = db.define_table('mynumber', Field('x', 'integer')) 10490 >>> db(mynumber).delete() 10491 0 10492 >>> for i in range(10): tmp = mynumber.insert(x=i) 10493 >>> db(mynumber).select(mynumber.x.sum())[0](mynumber.x.sum()) 10494 45 10495 10496 >>> db(mynumber.x+2==5).select(mynumber.x + 2)[0](mynumber.x + 2) 10497 5 10498 10499 Output in csv 10500 10501 >>> print str(authored_papers.select(db.author.name, db.paper.title)).strip() 10502 author.name,paper.title\r 10503 Massimo,QCD 10504 10505 Delete all leftover tables 10506 10507 >>> DAL.distributed_transaction_commit(db) 10508 10509 >>> db.mynumber.drop() 10510 >>> db.authorship.drop() 10511 >>> db.author.drop() 10512 >>> db.paper.drop() 10513 """
10514 ################################################################################ 10515 # deprecated since the new DAL; here only for backward compatibility 10516 ################################################################################ 10517 10518 SQLField = Field 10519 SQLTable = Table 10520 SQLXorable = Expression 10521 SQLQuery = Query 10522 SQLSet = Set 10523 SQLRows = Rows 10524 SQLStorage = Row 10525 SQLDB = DAL 10526 GQLDB = DAL 10527 DAL.Field = Field # was necessary in gluon/globals.py session.connect 10528 DAL.Table = Table # was necessary in gluon/globals.py session.connect
10529 10530 ################################################################################ 10531 # Geodal utils 10532 ################################################################################ 10533 10534 -def geoPoint(x,y):
10535 return "POINT (%f %f)" % (x,y)
10536
10537 -def geoLine(*line):
10538 return "LINESTRING (%s)" % ','.join("%f %f" % item for item in line)
10539
10540 -def geoPolygon(*line):
10541 return "POLYGON ((%s))" % ','.join("%f %f" % item for item in line)
10542 10543 ################################################################################ 10544 # run tests 10545 ################################################################################ 10546 10547 if __name__ == '__main__': 10548 import doctest 10549 doctest.testmod() 10550